diff --git a/spack.yaml b/spack.yaml index bc8338b4..ece1b606 100644 --- a/spack.yaml +++ b/spack.yaml @@ -22,7 +22,7 @@ spack: - "armadillo" - "tbb" - "eigen" - - "meteoio" + - "meteoio@2.11.0" - "func@2.2.0: ~openmp" - "trilinos@15: +mpi" - "jemalloc" diff --git a/src/modules/snowpack.cpp b/src/modules/snowpack.cpp index a7f5685a..4af767d2 100644 --- a/src/modules/snowpack.cpp +++ b/src/modules/snowpack.cpp @@ -76,6 +76,13 @@ Lehning_snowpack::Lehning_snowpack(config_file cfg) provides("MS_WATER"); provides("MS_TOTALMASS"); provides("MS_SOIL_RUNOFF"); + + provides("snow_sphericity"); + provides("snow_grain_size"); + provides("frac_ice_content"); + provides("Sliq"); + provides("Tsnow"); + provides("snow_density"); } @@ -190,7 +197,7 @@ void Lehning_snowpack::run(mesh_elem &face) Mdata.elev = (*face)["solar_el"_s]*mio::Cst::to_rad; data.cum_precip += Mdata.psum; //running sum of the precip. snowpack removes the rain component for us. - data.meteo->compMeteo(Mdata,*(data.Xdata),false); // no canopy model + data.meteo->compMeteo(Mdata,*(data.Xdata),false,false); // no canopy model double mass_erode = 0; @@ -240,6 +247,12 @@ void Lehning_snowpack::run(mesh_elem &face) } } + // Perform snow preparation (grooming) if enabled + if(data.is_grooming) + { + data.sp->snowPreparation(Mdata.date, *(data.Xdata)); + } + (*face)["sublimation"_s]=surface_fluxes.mass[SurfaceFluxes::MS_SUBLIMATION]; @@ -273,6 +286,25 @@ void Lehning_snowpack::run(mesh_elem &face) (*face)["snow_albedo"_s]=data.Xdata->Albedo; //even if we have a measured albedo, Xdata will reflect this. //surface_fluxes.pAlbedo); // } + // New parameters + if(data.Xdata->getNumberOfElements() > 0) + { + // Top element is at index nElems-1 (elements are ordered from ground (0) to surface) + const auto& top_element = data.Xdata->Edata[data.Xdata->getNumberOfElements() - 1]; + (*face)["snow_sphericity"_s] = top_element.sp; + (*face)["snow_grain_size"_s] = top_element.rg; + (*face)["frac_ice_content"_s] = top_element.theta[ICE]; // ICE is defined as 1 in DataClasses.h + (*face)["Sliq"_s] = top_element.theta[WATER]; // WATER is defined as 2 in DataClasses.h + (*face)["snow_density"_s] = top_element.Rho; // Snow density [kg/m3] + } + + if(data.Xdata->getNumberOfNodes() > 0) + { + // Top node is at index nNodes-1 (nodes are ordered from ground (0) to surface) + const auto& top_node = data.Xdata->Ndata[data.Xdata->getNumberOfNodes() - 1]; + (*face)["Tsnow"_s] = top_node.T - mio::Cst::t_water_freezing_pt; // Convert from K to °C + } + } else{ set_all_nan_on_skip(face); @@ -350,6 +382,35 @@ void Lehning_snowpack::init(mesh& domain) //Only works when COMBINE_ELEMENTS == TRUE. d.config.addKey("REDUCE_N_ELEMENTS","SnowpackAdvanced","true"); + double grooming_week_start = cfg.get("GROOMING_WEEK_START", 40); //First week of grooming + double grooming_week_end = cfg.get("GROOMING_WEEK_END", 17); //Last week of grooming + double grooming_hour = cfg.get("GROOMING_HOUR", 21); //Hour at which grooming is performed + double grooming_depth_start = cfg.get("GROOMING_DEPTH_START", 0.4); // How much snow on the ground to start grooming + double grooming_depth_impact = cfg.get("GROOMING_DEPTH_IMPACT", 0.4); //maximum depth of snow impacted by grooming + // Check for grooming parameter in domain - if true, enable SNOW_GROOMING + d.is_grooming = false; + if(face->has_parameter("grooming")){ + d.is_grooming = face->parameter("grooming"_s); + } else if(face->has_parameter("Resort")){ // Check if Resort parameter exists and use it to look up grooming in parameter mapping (similar to landcover for SimpleCanopy, look SnowCast config) + int resort_type = face->parameter("Resort"_s); + try { + d.is_grooming = global_param->parameters.get("Resort." + std::to_string(resort_type) + ".grooming"); + } catch(const boost::property_tree::ptree_bad_path& e) { + SPDLOG_ERROR("No grooming parameter defined for Resort type: {}", resort_type); + } + } + + if(d.is_grooming) + { + d.config.addKey("SNOW_GROOMING","TechSnow","true"); + d.config.addKey("GROOMING_WEEK_START","TechSnow",std::to_string(grooming_week_start)); + d.config.addKey("GROOMING_WEEK_END","TechSnow",std::to_string(grooming_week_end)); + d.config.addKey("GROOMING_HOUR","TechSnow",std::to_string(grooming_hour)); + d.config.addKey("GROOMING_DEPTH_START","TechSnow",std::to_string(grooming_depth_start)); + d.config.addKey("GROOMING_DEPTH_IMPACT","TechSnow",std::to_string(grooming_depth_impact)); + } + + // because we use our own config, we need to do the conversion //format is same key-val pairs that snowpack expects, case sensitive @@ -442,3 +503,627 @@ void Lehning_snowpack::init(mesh& domain) } } + +// Maximum number of snow layers to support in checkpoint +// Based on SmetIO.cc writesnowcover/readsnowcover variables +static const size_t MAX_LAYERS = 200; + +void Lehning_snowpack::checkpoint(mesh& domain, netcdf& chkpt) +{ + auto& nc = chkpt.get_ncfile(); + size_t nFaces = domain->size_local_faces(); + + // First pass: find the actual maximum number of layers across all triangles + size_t maxLayers = 0; + for (size_t i = 0; i < nFaces; i++) + { + auto face = domain->face(i); + auto& d = face->get_module_data(ID); + auto& Xdata = *(d.Xdata); + + size_t nElems = Xdata.getNumberOfElements(); + size_t nNodes = Xdata.getNumberOfNodes(); + maxLayers = std::max(maxLayers, nElems); + maxLayers = std::max(maxLayers, nNodes); + } + + maxLayers = std::max(size_t(1), std::min(maxLayers, MAX_LAYERS)); + + // Create dimensions + netCDF::NcDim triDim; + netCDF::NcDim layerDim; + try { + triDim = nc.addDim("snowpack_tri_id", nFaces); + } catch(netCDF::exceptions::NcNameInUse&) { + triDim = nc.getDim("snowpack_tri_id"); + } + try { + layerDim = nc.addDim("snowpack_layer", maxLayers); + } catch(netCDF::exceptions::NcNameInUse&) { + layerDim = nc.getDim("snowpack_layer"); + } + + std::vector dims2D = {triDim, layerDim}; + std::vector dims1D = {triDim}; + + // Helper lambda to create 1D variable + auto create1D = [&](const std::string& name) { + try { + nc.addVar(name, netCDF::ncDouble, dims1D); + } catch(netCDF::exceptions::NcNameInUse&) {} + }; + + // Helper lambda to create 2D variable + auto create2D = [&](const std::string& name) { + try { + nc.addVar(name, netCDF::ncDouble, dims2D); + } catch(netCDF::exceptions::NcNameInUse&) {} + }; + + // Create scalar variables (1D - one per triangle) - SmetIO style + create1D("snowpack:nElems"); + create1D("snowpack:nNodes"); + create1D("snowpack:Albedo"); + create1D("snowpack:pAlbedo"); + create1D("snowpack:swe"); + create1D("snowpack:cH"); + create1D("snowpack:mH"); + create1D("snowpack:Ground"); + create1D("snowpack:mass_sum"); + create1D("snowpack:lwc_sum"); + create1D("snowpack:ColdContent"); + create1D("snowpack:dIntEnergy"); + create1D("snowpack:ErosionMass"); + create1D("snowpack:cum_precip"); + create1D("snowpack:sum_subl"); + create1D("snowpack:hn"); + create1D("snowpack:rho_hn"); + + // Create element data variables (2D - per triangle per layer) - Exact SmetIO fields + create2D("snowpack:E_L"); // Layer_Thick [m] + create2D("snowpack:E_Te"); // Layer temperature [K] + create2D("snowpack:E_theta_ICE"); // Vol_Frac_I - ice content [0-1] + create2D("snowpack:E_theta_i_reservoir"); // Vol_Frac_IR - ice reservoir [0-1] + create2D("snowpack:E_theta_i_reservoir_cumul"); // Vol_Frac_CIR - cumulative ice reservoir [0-1] + create2D("snowpack:E_theta_WATER"); // Vol_Frac_W - liquid water [0-1] + create2D("snowpack:E_theta_WATER_PREF"); // Vol_Frac_WP - preferential flow water [0-1] + create2D("snowpack:E_theta_AIR"); // Vol_Frac_V - voids/air [0-1] + create2D("snowpack:E_theta_SOIL"); // Vol_Frac_S - soil content [0-1] + create2D("snowpack:E_soil_rho"); // Rho_S - soil density [kg/m3] + create2D("snowpack:E_soil_k"); // Conduc_S - soil conductivity [W/(mK)] + create2D("snowpack:E_soil_c"); // HeatCapac_S - soil heat capacity [J/(kgK)] + create2D("snowpack:E_rg"); // rg - grain radius [mm] + create2D("snowpack:E_rb"); // rb - bond radius [mm] + create2D("snowpack:E_dd"); // dd - dendricity [0-1] + create2D("snowpack:E_sp"); // sp - sphericity [0-1] + create2D("snowpack:E_mk"); // mk - grain marker + create2D("snowpack:E_CDot"); // CDot - stress rate [Pa/s] + create2D("snowpack:E_metamo"); // metamo - metamorphism state + create2D("snowpack:E_dsm"); // dsm - dry snow metamorphism (NIED) + create2D("snowpack:E_salinity"); // Sal - salinity [PSU] + create2D("snowpack:E_h"); // h - capillary pressure head [m] + create2D("snowpack:E_depositionDate"); // depositionDate - layer deposition date as Julian date + + // Create node data variables (2D - per triangle per node) - Exact SmetIO fields + create2D("snowpack:N_T"); // Temperature [K] + create2D("snowpack:N_hoar"); // mass_hoar - surface hoar mass + + double fillValue = mio::IOUtils::nodata; + + // Allocate buffers using the actual maxLayers, not MAX_LAYERS + std::vector buf_nElems(nFaces); + std::vector buf_nNodes(nFaces); + std::vector buf_Albedo(nFaces); + std::vector buf_pAlbedo(nFaces); + std::vector buf_swe(nFaces); + std::vector buf_cH(nFaces); + std::vector buf_mH(nFaces); + std::vector buf_Ground(nFaces); + std::vector buf_mass_sum(nFaces); + std::vector buf_lwc_sum(nFaces); + std::vector buf_ColdContent(nFaces); + std::vector buf_dIntEnergy(nFaces); + std::vector buf_ErosionMass(nFaces); + std::vector buf_cum_precip(nFaces); + std::vector buf_sum_subl(nFaces); + std::vector buf_hn(nFaces); + std::vector buf_rho_hn(nFaces); + + size_t total2D = nFaces * maxLayers; + std::vector buf_E_L(total2D, fillValue); + std::vector buf_E_Te(total2D, fillValue); + std::vector buf_E_theta_ICE(total2D, fillValue); + std::vector buf_E_theta_i_reservoir(total2D, fillValue); + std::vector buf_E_theta_i_reservoir_cumul(total2D, fillValue); + std::vector buf_E_theta_WATER(total2D, fillValue); + std::vector buf_E_theta_WATER_PREF(total2D, fillValue); + std::vector buf_E_theta_AIR(total2D, fillValue); + std::vector buf_E_theta_SOIL(total2D, fillValue); + std::vector buf_E_soil_rho(total2D, fillValue); + std::vector buf_E_soil_k(total2D, fillValue); + std::vector buf_E_soil_c(total2D, fillValue); + std::vector buf_E_rg(total2D, fillValue); + std::vector buf_E_rb(total2D, fillValue); + std::vector buf_E_dd(total2D, fillValue); + std::vector buf_E_sp(total2D, fillValue); + std::vector buf_E_mk(total2D, fillValue); + std::vector buf_E_CDot(total2D, fillValue); + std::vector buf_E_metamo(total2D, fillValue); + std::vector buf_E_dsm(total2D, fillValue); + std::vector buf_E_salinity(total2D, fillValue); + std::vector buf_E_h(total2D, fillValue); + std::vector buf_E_depositionDate(total2D, fillValue); + + std::vector buf_N_T(total2D, fillValue); + std::vector buf_N_hoar(total2D, fillValue); + + // Fill buffers from mesh data + for (size_t i = 0; i < nFaces; i++) + { + auto face = domain->face(i); + auto& d = face->get_module_data(ID); + auto& Xdata = *(d.Xdata); + + size_t nElems = Xdata.getNumberOfElements(); + size_t nNodes = Xdata.getNumberOfNodes(); + + buf_nElems[i] = static_cast(nElems); + buf_nNodes[i] = static_cast(nNodes); + buf_Albedo[i] = Xdata.Albedo; + buf_pAlbedo[i] = Xdata.pAlbedo; + buf_swe[i] = Xdata.swe; + buf_cH[i] = Xdata.cH; + buf_mH[i] = Xdata.mH; + buf_Ground[i] = Xdata.Ground; + buf_mass_sum[i] = Xdata.mass_sum; + buf_lwc_sum[i] = Xdata.lwc_sum; + buf_ColdContent[i] = Xdata.ColdContent; + buf_dIntEnergy[i] = Xdata.dIntEnergy; + buf_ErosionMass[i] = Xdata.ErosionMass; + buf_cum_precip[i] = d.cum_precip; + buf_sum_subl[i] = d.sum_subl; + buf_hn[i] = Xdata.hn; + buf_rho_hn[i] = Xdata.rho_hn; + + // Fill element data - Exact SmetIO fields + for (size_t e = 0; e < nElems && e < maxLayers; e++) + { + size_t idx = i * maxLayers + e; + auto& elem = Xdata.Edata[e]; + buf_E_L[idx] = elem.L; + buf_E_Te[idx] = Xdata.Ndata[e+1].T; // Note: SmetIO uses node T for layer temperature + buf_E_theta_ICE[idx] = elem.theta[ICE]; + buf_E_theta_i_reservoir[idx] = elem.theta_i_reservoir; + buf_E_theta_i_reservoir_cumul[idx] = elem.theta_i_reservoir_cumul; + buf_E_theta_WATER[idx] = elem.theta[WATER]; + buf_E_theta_WATER_PREF[idx] = elem.theta[WATER_PREF]; + buf_E_theta_AIR[idx] = elem.theta[AIR]; + buf_E_theta_SOIL[idx] = elem.theta[SOIL]; + buf_E_soil_rho[idx] = elem.soil[SOIL_RHO]; + buf_E_soil_k[idx] = elem.soil[SOIL_K]; + buf_E_soil_c[idx] = elem.soil[SOIL_C]; + buf_E_rg[idx] = elem.rg; + buf_E_rb[idx] = elem.rb; + buf_E_dd[idx] = elem.dd; + buf_E_sp[idx] = elem.sp; + buf_E_mk[idx] = static_cast(elem.mk); + buf_E_CDot[idx] = elem.CDot; + buf_E_metamo[idx] = elem.metamo; + buf_E_dsm[idx] = elem.dsm; + buf_E_salinity[idx] = elem.salinity; + buf_E_h[idx] = elem.h; + // Store depositionDate as Julian date; use nodata if undefined + buf_E_depositionDate[idx] = elem.depositionDate.isUndef() ? mio::IOUtils::nodata : elem.depositionDate.getJulian(); + } + + // Fill node data - Exact SmetIO fields + for (size_t n = 0; n < nNodes && n < maxLayers; n++) + { + size_t idx = i * maxLayers + n; + auto& node = Xdata.Ndata[n]; + buf_N_T[idx] = node.T; + buf_N_hoar[idx] = node.hoar; + } + } + + // Batch write all 1D variables + nc.getVar("snowpack:nElems").putVar(buf_nElems.data()); + nc.getVar("snowpack:nNodes").putVar(buf_nNodes.data()); + nc.getVar("snowpack:Albedo").putVar(buf_Albedo.data()); + nc.getVar("snowpack:pAlbedo").putVar(buf_pAlbedo.data()); + nc.getVar("snowpack:swe").putVar(buf_swe.data()); + nc.getVar("snowpack:cH").putVar(buf_cH.data()); + nc.getVar("snowpack:mH").putVar(buf_mH.data()); + nc.getVar("snowpack:Ground").putVar(buf_Ground.data()); + nc.getVar("snowpack:mass_sum").putVar(buf_mass_sum.data()); + nc.getVar("snowpack:lwc_sum").putVar(buf_lwc_sum.data()); + nc.getVar("snowpack:ColdContent").putVar(buf_ColdContent.data()); + nc.getVar("snowpack:dIntEnergy").putVar(buf_dIntEnergy.data()); + nc.getVar("snowpack:ErosionMass").putVar(buf_ErosionMass.data()); + nc.getVar("snowpack:cum_precip").putVar(buf_cum_precip.data()); + nc.getVar("snowpack:sum_subl").putVar(buf_sum_subl.data()); + nc.getVar("snowpack:hn").putVar(buf_hn.data()); + nc.getVar("snowpack:rho_hn").putVar(buf_rho_hn.data()); + + // Batch write all 2D variables - Exact SmetIO fields + nc.getVar("snowpack:E_L").putVar(buf_E_L.data()); + nc.getVar("snowpack:E_Te").putVar(buf_E_Te.data()); + nc.getVar("snowpack:E_theta_ICE").putVar(buf_E_theta_ICE.data()); + nc.getVar("snowpack:E_theta_i_reservoir").putVar(buf_E_theta_i_reservoir.data()); + nc.getVar("snowpack:E_theta_i_reservoir_cumul").putVar(buf_E_theta_i_reservoir_cumul.data()); + nc.getVar("snowpack:E_theta_WATER").putVar(buf_E_theta_WATER.data()); + nc.getVar("snowpack:E_theta_WATER_PREF").putVar(buf_E_theta_WATER_PREF.data()); + nc.getVar("snowpack:E_theta_AIR").putVar(buf_E_theta_AIR.data()); + nc.getVar("snowpack:E_theta_SOIL").putVar(buf_E_theta_SOIL.data()); + nc.getVar("snowpack:E_soil_rho").putVar(buf_E_soil_rho.data()); + nc.getVar("snowpack:E_soil_k").putVar(buf_E_soil_k.data()); + nc.getVar("snowpack:E_soil_c").putVar(buf_E_soil_c.data()); + nc.getVar("snowpack:E_rg").putVar(buf_E_rg.data()); + nc.getVar("snowpack:E_rb").putVar(buf_E_rb.data()); + nc.getVar("snowpack:E_dd").putVar(buf_E_dd.data()); + nc.getVar("snowpack:E_sp").putVar(buf_E_sp.data()); + nc.getVar("snowpack:E_mk").putVar(buf_E_mk.data()); + nc.getVar("snowpack:E_CDot").putVar(buf_E_CDot.data()); + nc.getVar("snowpack:E_metamo").putVar(buf_E_metamo.data()); + nc.getVar("snowpack:E_dsm").putVar(buf_E_dsm.data()); + nc.getVar("snowpack:E_salinity").putVar(buf_E_salinity.data()); + nc.getVar("snowpack:E_h").putVar(buf_E_h.data()); + nc.getVar("snowpack:E_depositionDate").putVar(buf_E_depositionDate.data()); + + nc.getVar("snowpack:N_T").putVar(buf_N_T.data()); + nc.getVar("snowpack:N_hoar").putVar(buf_N_hoar.data()); +} + +void Lehning_snowpack::load_checkpoint(mesh& domain, netcdf& chkpt) +{ + auto& nc = chkpt.get_ncfile(); + size_t nFaces = domain->size_local_faces(); + + // Get the actual layer dimension from the checkpoint file + size_t maxLayers = nc.getDim("snowpack_layer").getSize(); + size_t total2D = nFaces * maxLayers; + + // Allocate buffers for batch reading + std::vector buf_nElems(nFaces); + std::vector buf_nNodes(nFaces); + std::vector buf_Albedo(nFaces); + std::vector buf_pAlbedo(nFaces); + std::vector buf_swe(nFaces); + std::vector buf_cH(nFaces); + std::vector buf_mH(nFaces); + std::vector buf_Ground(nFaces); + std::vector buf_mass_sum(nFaces); + std::vector buf_lwc_sum(nFaces); + std::vector buf_ColdContent(nFaces); + std::vector buf_dIntEnergy(nFaces); + std::vector buf_ErosionMass(nFaces); + std::vector buf_cum_precip(nFaces); + std::vector buf_sum_subl(nFaces); + std::vector buf_hn(nFaces); + std::vector buf_rho_hn(nFaces); + + std::vector buf_E_L(total2D); + std::vector buf_E_Te(total2D); + std::vector buf_E_theta_ICE(total2D); + std::vector buf_E_theta_i_reservoir(total2D, 0.0); + std::vector buf_E_theta_i_reservoir_cumul(total2D, 0.0); + std::vector buf_E_theta_WATER(total2D); + std::vector buf_E_theta_WATER_PREF(total2D, 0.0); + std::vector buf_E_theta_AIR(total2D); + std::vector buf_E_theta_SOIL(total2D); + std::vector buf_E_soil_rho(total2D); + std::vector buf_E_soil_k(total2D); + std::vector buf_E_soil_c(total2D); + std::vector buf_E_rg(total2D); + std::vector buf_E_rb(total2D); + std::vector buf_E_dd(total2D); + std::vector buf_E_sp(total2D); + std::vector buf_E_mk(total2D); + std::vector buf_E_CDot(total2D, 0.0); + std::vector buf_E_metamo(total2D, 0.0); + std::vector buf_E_dsm(total2D, 0.0); + std::vector buf_E_salinity(total2D, 0.0); + std::vector buf_E_h(total2D, 0.0); + std::vector buf_E_depositionDate(total2D, mio::IOUtils::nodata); + + std::vector buf_N_T(total2D); + std::vector buf_N_hoar(total2D, 0.0); + + // Batch read all 1D variables + nc.getVar("snowpack:nElems").getVar(buf_nElems.data()); + nc.getVar("snowpack:nNodes").getVar(buf_nNodes.data()); + nc.getVar("snowpack:Albedo").getVar(buf_Albedo.data()); + nc.getVar("snowpack:pAlbedo").getVar(buf_pAlbedo.data()); + nc.getVar("snowpack:swe").getVar(buf_swe.data()); + nc.getVar("snowpack:cH").getVar(buf_cH.data()); + nc.getVar("snowpack:mH").getVar(buf_mH.data()); + nc.getVar("snowpack:Ground").getVar(buf_Ground.data()); + nc.getVar("snowpack:mass_sum").getVar(buf_mass_sum.data()); + nc.getVar("snowpack:lwc_sum").getVar(buf_lwc_sum.data()); + nc.getVar("snowpack:ColdContent").getVar(buf_ColdContent.data()); + nc.getVar("snowpack:dIntEnergy").getVar(buf_dIntEnergy.data()); + nc.getVar("snowpack:ErosionMass").getVar(buf_ErosionMass.data()); + nc.getVar("snowpack:cum_precip").getVar(buf_cum_precip.data()); + nc.getVar("snowpack:sum_subl").getVar(buf_sum_subl.data()); + nc.getVar("snowpack:hn").getVar(buf_hn.data()); + nc.getVar("snowpack:rho_hn").getVar(buf_rho_hn.data()); + + // Batch read all 2D variables - Exact SmetIO fields + nc.getVar("snowpack:E_L").getVar(buf_E_L.data()); + nc.getVar("snowpack:E_Te").getVar(buf_E_Te.data()); + nc.getVar("snowpack:E_theta_ICE").getVar(buf_E_theta_ICE.data()); + try { nc.getVar("snowpack:E_theta_i_reservoir").getVar(buf_E_theta_i_reservoir.data()); } catch(...) {} + try { nc.getVar("snowpack:E_theta_i_reservoir_cumul").getVar(buf_E_theta_i_reservoir_cumul.data()); } catch(...) {} + nc.getVar("snowpack:E_theta_WATER").getVar(buf_E_theta_WATER.data()); + try { nc.getVar("snowpack:E_theta_WATER_PREF").getVar(buf_E_theta_WATER_PREF.data()); } catch(...) {} + nc.getVar("snowpack:E_theta_AIR").getVar(buf_E_theta_AIR.data()); + nc.getVar("snowpack:E_theta_SOIL").getVar(buf_E_theta_SOIL.data()); + nc.getVar("snowpack:E_soil_rho").getVar(buf_E_soil_rho.data()); + nc.getVar("snowpack:E_soil_k").getVar(buf_E_soil_k.data()); + nc.getVar("snowpack:E_soil_c").getVar(buf_E_soil_c.data()); + nc.getVar("snowpack:E_rg").getVar(buf_E_rg.data()); + nc.getVar("snowpack:E_rb").getVar(buf_E_rb.data()); + nc.getVar("snowpack:E_dd").getVar(buf_E_dd.data()); + nc.getVar("snowpack:E_sp").getVar(buf_E_sp.data()); + nc.getVar("snowpack:E_mk").getVar(buf_E_mk.data()); + try { nc.getVar("snowpack:E_CDot").getVar(buf_E_CDot.data()); } catch(...) {} + try { nc.getVar("snowpack:E_metamo").getVar(buf_E_metamo.data()); } catch(...) {} + try { nc.getVar("snowpack:E_dsm").getVar(buf_E_dsm.data()); } catch(...) {} + try { nc.getVar("snowpack:E_salinity").getVar(buf_E_salinity.data()); } catch(...) {} + try { nc.getVar("snowpack:E_h").getVar(buf_E_h.data()); } catch(...) {} + try { nc.getVar("snowpack:E_depositionDate").getVar(buf_E_depositionDate.data()); } catch(...) {} + + nc.getVar("snowpack:N_T").getVar(buf_N_T.data()); + try { nc.getVar("snowpack:N_hoar").getVar(buf_N_hoar.data()); } catch(...) {} + + // Distribute data to mesh elements + for (size_t i = 0; i < nFaces; i++) + { + auto face = domain->face(i); + auto& d = face->get_module_data(ID); + auto& Xdata = *(d.Xdata); + + // Read scalar values from buffers + size_t nElems = static_cast(buf_nElems[i]); + size_t nNodes = static_cast(buf_nNodes[i]); + + // First, re-initialize the SnowStation with basic configuration (same as init()) + // This ensures all internal structures are properly set up + SN_SNOWSOIL_DATA SSdata; + SSdata.SoilAlb = cfg.get("sno.SoilAlbedo", 0.09); + SSdata.Albedo = SSdata.SoilAlb; + SSdata.BareSoil_z0 = cfg.get("sno.BareSoil_z0", 0.2); + if (SSdata.BareSoil_z0 == 0.) { + SSdata.BareSoil_z0 = 0.2; + } + SSdata.WindScalingFactor = cfg.get("sno.WindScalingFactor", 1); + SSdata.TimeCountDeltaHS = cfg.get("sno.TimeCountDeltaHS", 0.0); + SSdata.meta.stationName = cfg.get("sno.station_name", "chm"); + SSdata.meta.position.setAltitude(face->get_z()); + SSdata.meta.position.setXY(face->get_x(), face->get_y(), face->get_z()); + SSdata.meta.setSlope(mio::IOUtils::nodata, mio::IOUtils::nodata); + SSdata.HS_last = 0.; + SSdata.nN = 1; + SSdata.Height = 0.; + SSdata.nLayers = 0; + SSdata.Canopy_Height = cfg.get("sno.CanopyHeight", 0); + SSdata.Canopy_LAI = cfg.get("sno.CanopyLeafAreaIndex", 0); + SSdata.Canopy_Direct_Throughfall = cfg.get("sno.CanopyDirectThroughfall", 1); + SSdata.ErosionLevel = cfg.get("sno.ErosionLevel", 0); + + Xdata.initialize(SSdata, 0); + + // Now overwrite with checkpoint data + Xdata.Albedo = buf_Albedo[i]; + Xdata.pAlbedo = buf_pAlbedo[i]; + Xdata.swe = buf_swe[i]; + Xdata.cH = buf_cH[i]; + Xdata.mH = buf_mH[i]; + Xdata.Ground = buf_Ground[i]; + Xdata.mass_sum = buf_mass_sum[i]; + Xdata.lwc_sum = buf_lwc_sum[i]; + Xdata.ColdContent = buf_ColdContent[i]; + Xdata.dIntEnergy = buf_dIntEnergy[i]; + Xdata.ErosionMass = buf_ErosionMass[i]; + d.cum_precip = buf_cum_precip[i]; + d.sum_subl = buf_sum_subl[i]; + Xdata.hn = buf_hn[i]; + Xdata.rho_hn = buf_rho_hn[i]; + + // Resize the vectors to accommodate the loaded data + Xdata.resize(nElems); + + // Read element data from buffers - Exact SmetIO style + // Use nElems (per-triangle actual count), not maxLayers, to avoid reading nodata + for (size_t e = 0; e < nElems; e++) + { + size_t idx = i * maxLayers + e; + auto& elem = Xdata.Edata[e]; + + // Initialize all fields to reasonable defaults - matching SmetIO + elem.L = buf_E_L[idx]; + elem.L0 = elem.L; + elem.Te = buf_E_Te[idx]; + elem.gradT = 0.0; + elem.meltfreeze_tk = mio::Cst::t_water_freezing_pt; + elem.theta[ICE] = buf_E_theta_ICE[idx]; + elem.theta_i_reservoir = buf_E_theta_i_reservoir[idx]; + elem.theta_i_reservoir_cumul = buf_E_theta_i_reservoir_cumul[idx]; + elem.theta[WATER] = buf_E_theta_WATER[idx]; + elem.theta[WATER_PREF] = buf_E_theta_WATER_PREF[idx]; + elem.theta[AIR] = buf_E_theta_AIR[idx]; + elem.theta[SOIL] = buf_E_theta_SOIL[idx]; + elem.h = buf_E_h[idx]; + elem.soil[SOIL_RHO] = buf_E_soil_rho[idx]; + elem.soil[SOIL_K] = buf_E_soil_k[idx]; + elem.soil[SOIL_C] = buf_E_soil_c[idx]; + elem.Rho = (elem.theta[ICE] * 917.0) + (elem.theta[WATER] * 1000.0) + (elem.theta[SOIL] * elem.soil[SOIL_RHO]); + elem.M = elem.Rho * elem.L; + elem.sw_abs = 0.0; + elem.rg = buf_E_rg[idx]; + elem.rb = buf_E_rb[idx]; + elem.dd = buf_E_dd[idx]; + elem.sp = buf_E_sp[idx]; + elem.ogs = elem.rg; // Default to grain radius + elem.N3 = 4.0; // Typical coordination number + elem.mk = static_cast(buf_E_mk[idx] + 0.5); + elem.type = 0; + elem.metamo = buf_E_metamo[idx]; + elem.salinity = buf_E_salinity[idx]; + // Restore depositionDate from Julian date; keep as undefined (default) if nodata + if (buf_E_depositionDate[idx] != mio::IOUtils::nodata) { + elem.depositionDate.setDate(buf_E_depositionDate[idx], 0.0); + } + elem.dth_w = 0.0; + // res_wat_cont will be computed by snowResidualWaterContent() below + elem.Qmf = 0.0; + elem.QIntmf = 0.0; + elem.dEps = 0.0; + elem.Eps = 0.0; + elem.Eps_e = 0.0; + elem.Eps_v = 0.0; + elem.Eps_Dot = 0.0; + elem.Eps_vDot = 0.0; + elem.E = 0.0; + elem.S = 0.0; + elem.C = 0.0; + elem.CDot = buf_E_CDot[idx]; + elem.ps2rb = 0.0; + elem.s_strength = 0.0; + elem.hard = 0.0; + elem.S_dr = 0.0; + elem.crit_cut_length = 0.0; + elem.lwc_source = 0.0; + elem.PrefFlowArea = 0.0; + elem.theta_w_transfer = 0.0; + elem.SlopeParFlux = 0.0; + elem.Qph_up = 0.0; + elem.Qph_down = 0.0; + elem.dsm = buf_E_dsm[idx]; + elem.rime = 0.0; + elem.rhov = 0.0; + elem.Qmm = 0.0; + elem.vapTrans_fluxDiff = 0.0; + elem.vapTrans_snowDenChangeRate = 0.0; + elem.vapTrans_cumulativeDenChange = 0.0; + elem.vapTrans_underSaturationDegree = 0.0; + } + + // These are normally set in SnowStation::initialize() but need to be explicitly + // computed here after loading checkpoint data + Xdata.SoilNode = 0; // Will be computed below + for (size_t e = 0; e < nElems; e++) { + auto& elem = Xdata.Edata[e]; + + // Compute residual water content based on ice content (required for boundary conditions) + elem.snowResidualWaterContent(); + + // Compute heat capacity (required for thermal matrix) + elem.heatCapacity(); + + // Update density from volumetric contents (ensures consistency) + elem.updDensity(); + + // Compute mass from density and length + elem.M = elem.Rho * elem.L; + + // Count soil nodes for proper boundary condition handling + if (elem.theta[SOIL] > 0.0) { + Xdata.SoilNode++; + } + } + + // Read node data from buffers - Match SmetIO + for (size_t n = 0; n < nNodes; n++) + { + size_t idx = i * maxLayers + n; + auto& node = Xdata.Ndata[n]; + node.T = buf_N_T[idx]; + node.hoar = buf_N_hoar[idx]; + // Ensure all other fields are initialized (NodeData constructor already does this, but just in case) + node.z = 0.0; + node.u = 0.0; + node.f = 0.0; + node.udot = 0.0; + node.S_n = 0.0; + node.S_s = 0.0; + node.ssi = 6.0; // Max stability + node.dsm = 0.0; + node.S_dsm = 0.0; + node.Sigdsm = 0.0; + node.rime = 0.0; + node.water_flux = 0.0; + node.rhov = 0.0; + } + + // Ensure ground node has a valid temperature (not nodata) + // When there's no snow, the ground node temperature is critical for stability + if (Xdata.Ndata[0].T <= 0.0 || Xdata.Ndata[0].T > 400.0) { + SPDLOG_DEBUG("Face {}: Invalid ground node temperature {}, resetting to freezing point", + i, Xdata.Ndata[0].T); + Xdata.Ndata[0].T = mio::Cst::t_water_freezing_pt; + } + + // Ensure swe is consistent with nElems + // If there are no elements, swe should be 0 + if (nElems == 0 && Xdata.swe > 0.0) { + SPDLOG_DEBUG("Resetting swe from {} to 0 for face {} (no elements)", Xdata.swe, i); + Xdata.swe = 0.0; + } + + // Recompute node positions (z) from element thicknesses (L) + // This is necessary for proper thermal calculations + // Ndata[0] is at the bottom (ground), Ndata[nNodes-1] is at the surface + Xdata.Ndata[0].z = 0.0; // Ground level + double computed_cH = Xdata.Ground; // Start from ground level + for (size_t e = 0; e < nElems; e++) { + // Node e+1 is above element e + Xdata.Ndata[e + 1].z = Xdata.Ndata[e].z + Xdata.Edata[e].L; + computed_cH += Xdata.Edata[e].L; + } + // Ensure cH is consistent with the sum of element thicknesses plus ground + // Use computed value if there's a mismatch (indicates checkpoint data inconsistency) + if (nElems > 0 && std::abs(Xdata.cH - computed_cH) > 0.001) { + SPDLOG_DEBUG("Adjusting cH from {} to {} for face {} (checkpoint inconsistency)", + Xdata.cH, computed_cH, i); + Xdata.cH = computed_cH; + } + + // Synchronize element temperatures with node temperatures for consistency + // This ensures Te is the average of adjacent node temperatures + for (size_t e = 0; e < nElems; e++) { + Xdata.Edata[e].Te = (Xdata.Ndata[e].T + Xdata.Ndata[e + 1].T) / 2.; + } + + // Set output variables from loaded state + (*face)["swe"_s] = Xdata.swe; + (*face)["snowdepthavg"_s] = Xdata.cH - Xdata.Ground; + + if (Xdata.swe > 0) + { + double bulk_T_s = 0; + for (size_t e = 0; e < nElems; ++e) + { + bulk_T_s += Xdata.Edata[e].Te; + } + if (nElems > 0) + bulk_T_s /= nElems; + + (*face)["T_s"_s] = bulk_T_s - mio::Cst::t_water_freezing_pt; + (*face)["n_nodes"_s] = nNodes; + (*face)["n_elem"_s] = nElems; + } + else + { + (*face)["T_s"_s] = mio::IOUtils::nodata; + (*face)["n_nodes"_s] = 0; + (*face)["n_elem"_s] = 0; + } + + (*face)["mass_snowpack_removed"_s] = Xdata.ErosionMass; + (*face)["snow_albedo"_s] = Xdata.Albedo; + (*face)["sum_subl"_s] = d.sum_subl; + } + + SPDLOG_INFO("Snowpack checkpoint loaded successfully for {} faces", nFaces); +} diff --git a/src/modules/snowpack.hpp b/src/modules/snowpack.hpp index 12f6c2fb..685cd218 100644 --- a/src/modules/snowpack.hpp +++ b/src/modules/snowpack.hpp @@ -91,19 +91,32 @@ * - "MS_WATER" * - "MS_TOTALMASS" * - "MS_SOIL_RUNOFF" + * - Top layer Snow sphericity "snow_sphericity" [-] + * - Top layer Snow grain size "snow_grain_size" [mm] + * - Top layer Fraction of ice content "frac_ice_content" [-] + * - Top layer Liquid water content "Sliq" [-] + * - Snow surface temperature "Tsnow" [\f$ {}^\circ C \f$] + * - Top layer Snow density "snow_density" [\f$ kg \cdot m^{-3} \f$] * * **Configuration:** * \rst * .. code:: json * + * "Lehning_snowpack": * { - * "sno": - * { - * "SoilAlbedo": 0.09, - * "BareSoil_z0": 0.2, - * "WindScalingFactor": 1, - * "TimeCountDeltaHS": 0.0 * - * } + * "sno": + * { + * "SoilAlbedo": 0.09, + * "BareSoil_z0": 0.2, + * "WindScalingFactor": 1, + * "TimeCountDeltaHS": 0.0 + * }, + * "GROOMING_WEEK_START": 40, + * "GROOMING_WEEK_END": 17, + * "GROOMING_HOUR": 21, + * "GROOMING_DEPTH_START": 0.4, + * "GROOMING_DEPTH_IMPACT": 0.4 + * } * \endrst * @} */ @@ -119,6 +132,8 @@ REGISTER_MODULE_HPP(Lehning_snowpack); virtual void init(mesh& domain); + virtual void checkpoint(mesh& domain, netcdf& chkpt); + virtual void load_checkpoint(mesh& domain, netcdf& chkpt); struct data : public face_info { @@ -138,6 +153,7 @@ REGISTER_MODULE_HPP(Lehning_snowpack); double cum_precip; double sum_subl; + bool is_grooming; // Flag indicating if snow grooming is enabled for this face }; double sn_dt; // calculation step length diff --git a/third_party/snowpack/CMakeLists.txt b/third_party/snowpack/CMakeLists.txt index 62de3b02..0b03936a 100644 --- a/third_party/snowpack/CMakeLists.txt +++ b/third_party/snowpack/CMakeLists.txt @@ -15,16 +15,18 @@ IF(ENABLE_LAPACK) ENDIF(ENABLE_LAPACK) SET(snowpacklib_sources - DataClasses.cc - SnowpackConfig.cc - Meteo.cc - Saltation.cc - Laws_sn.cc - Utils.cc - StabilityAlgorithms.cc - Stability.cc - Hazard.cc - SnowDrift.cc + DataClasses.cc + vanGenuchten.cc + SnowpackConfig.cc + Meteo.cc + Saltation.cc + Laws_sn.cc + Utils.cc + StabilityAlgorithms.cc + Stability.cc + Hazard.cc + SnowDrift.cc + TechnicalSnow.cc ${plugins_sources} ${core_sources} ) diff --git a/third_party/snowpack/Constants.h b/third_party/snowpack/Constants.h index 1d4aa537..70199d09 100644 --- a/third_party/snowpack/Constants.h +++ b/third_party/snowpack/Constants.h @@ -40,9 +40,6 @@ #define SN_VERSION STR2( _VERSION ) #endif -/// @brief Initial value for stability parameter -#define INIT_STABILITY 999. - namespace Constants { const double undefined = -999.; /// #include @@ -47,8 +47,9 @@ unsigned short SnowStation::number_of_solutes = 0; const double SnowStation::thresh_moist_snow = 0.003; const double SnowStation::thresh_moist_soil = 0.0001; -/// Both elements must be smaller than COMB_THRESH_L (m) for an action to be taken -const double SnowStation::comb_thresh_l = 0.015; +/// The default ratio between height_new_elem and comb_thresh_l, in case comb_thresh_l is not explicitly defined. +const double SnowStation::comb_thresh_l_ratio = 0.75; + /// Volumetric ice content (1), i.e., about 46 kg m-3 const double SnowStation::comb_thresh_ice = 0.05; const double SnowStation::comb_thresh_water = 0.01; ///< Water content (1) @@ -57,12 +58,22 @@ const double SnowStation::comb_thresh_sp = 0.05; ///< Sphericity (1) const double SnowStation::comb_thresh_rg = 0.125; ///< Grain radius (mm) RunInfo::RunInfo() - : version(SN_VERSION), computation_date(getRunDate()), - compilation_date(getCompilationDate()), user(IOUtils::getLogName()) {} + : version(SN_VERSION), version_num( getNumericVersion(SN_VERSION) ), computation_date(getRunDate()), + compilation_date(getCompilationDate()), user(IOUtils::getLogName()), hostname(IOUtils::getHostName()) {} RunInfo::RunInfo(const RunInfo& orig) - : version(orig.version), computation_date(orig.computation_date), - compilation_date(orig.compilation_date), user(orig.user) {} + : version(orig.version), version_num(orig.version_num), computation_date(orig.computation_date), + compilation_date(orig.compilation_date), user(orig.user), hostname(orig.hostname) {} + +double RunInfo::getNumericVersion(std::string version_str) +{ + //remove any '-' used for formatting the date + version_str.erase(std::remove_if(version_str.begin(), version_str.end(), [] (char c) { return c=='-'; }), version_str.end()); + //keep only the first '.' and remove the other ones, if any + const size_t pos = version_str.find('.'); + version_str.erase(std::remove_if(version_str.begin()+pos+1, version_str.end(), [] (char c) { return c=='.'; }), version_str.end()); + return atof( version_str.c_str() ); +} mio::Date RunInfo::getRunDate() { @@ -86,7 +97,7 @@ void ZwischenData::reset() hn24.resize(144, 0.0); } -std::iostream& operator<<(std::iostream& os, const ZwischenData& data) +std::ostream& operator<<(std::ostream& os, const ZwischenData& data) { const size_t s_hoar24 = data.hoar24.size(); os.write(reinterpret_cast(&s_hoar24), sizeof(size_t)); @@ -100,7 +111,7 @@ std::iostream& operator<<(std::iostream& os, const ZwischenData& data) return os; } -std::iostream& operator>>(std::iostream& is, ZwischenData& data) +std::istream& operator>>(std::istream& is, ZwischenData& data) { size_t s_hoar24, s_hn3; is.read(reinterpret_cast(&s_hoar24), sizeof(size_t)); @@ -192,11 +203,11 @@ void SnowProfileLayer::generateLayer(const ElementData& Edata, const NodeData& N std::vector SnowProfileLayer::generateProfile(const mio::Date& dateOfProfile, const SnowStation& Xdata, const double hoar_density_surf, const double hoar_min_size_surf) { const size_t nE = Xdata.getNumberOfElements(); - const std::vector& NDS = Xdata.Ndata; - const std::vector& EMS = Xdata.Edata; + const vector& NDS = Xdata.Ndata; + const vector& EMS = Xdata.Edata; const double cos_sl = Xdata.cos_sl; const bool surf_hoar = (NDS[nE].hoar > (hoar_density_surf * MM_TO_M(hoar_min_size_surf))); - + // Generate the profile data from the element data (1 layer = 1 element) unsigned char snowloc = 0; string mystation = Xdata.meta.getStationID(); @@ -215,9 +226,9 @@ std::vector SnowProfileLayer::generateProfile(const mio::Date& Pdata[ll].stationname = mystation; Pdata[ll].loc_for_snow = snowloc; Pdata[ll].loc_for_wind = 1; - + // Write snow layer data - if (ll < nE) { + if (e < nE) { Pdata[ll].generateLayer(EMS[e], NDS[e+1]); } else { // add a SH layer Pdata[ll].generateLayer(EMS[nE-1], NDS[nE], dateOfProfile, hoar_density_surf); @@ -260,7 +271,7 @@ void SnowProfileLayer::average(const double& Lp0, const double& Lp1, const SnowP const std::string BoundCond::toString() const { - std::stringstream os; + std::ostringstream os; os << "\n"; os << "\tlw_out=" << lw_out << " lw_net=" << lw_net << "\n"; os << "\tQsensible=" << qs << " Qlatent=" << ql << " Qrain=" << qr << " Qgeo=" << qg << "\n"; @@ -268,6 +279,16 @@ const std::string BoundCond::toString() const return os.str(); } +void BoundCond::reset() +{ + lw_out = 0.; ///< outgoing longwave radiation + lw_net = 0.; ///< net longwave radiation + qs = 0.; ///< sensible heat + ql = 0.; ///< latent heat + qr = 0.; ///< rain energy + qg = 0.; ///< heat flux at lower boundary +} + SurfaceFluxes::SurfaceFluxes() : lw_in(0.), lw_out(0.), lw_net(0.), qs(0.), ql(0.), hoar(0.), qr(0.), qg(0.), qg0(0.), sw_hor(0.), sw_in(0.), sw_out(0.), qw(0.), sw_dir(0.), sw_diff(0.), pAlbedo(0.), mAlbedo(0.), dIntEnergy(0.), dIntEnergySoil(0.), meltFreezeEnergy(0.), meltFreezeEnergySoil(0.), @@ -356,7 +377,13 @@ void SurfaceFluxes::collectSurfaceFluxes(const BoundCond& Bdata, // 2) Long wave fluxes. lw_out += Bdata.lw_out; lw_net += Bdata.lw_net; - lw_in += (Bdata.lw_net + Bdata.lw_out); + if (Mdata.lw_net == IOUtils::nodata) { + // Default + lw_in += Atmosphere::blkBody_Radiation(Mdata.ea, Mdata.ta); + } else { + // NET_LW provided + lw_in += Bdata.lw_net + Bdata.lw_out; + } // 3) Turbulent fluxes. qs += Bdata.qs; @@ -375,23 +402,25 @@ void SurfaceFluxes::collectSurfaceFluxes(const BoundCond& Bdata, if(Xdata.SoilNode>0) { dIntEnergySoil += Xdata.dIntEnergySoil; // Now take care of the source and sink terms: - dIntEnergySoil += (mass[MS_SOIL_RUNOFF] * Constants::specific_heat_water * (Xdata.Edata[0].Te - Constants::melting_tk)); + dIntEnergySoil += (mass[MS_SOIL_RUNOFF] * Constants::specific_heat_water * (Xdata.Edata[0].Te - Constants::meltfreeze_tk)); if (Xdata.SoilNode < Xdata.getNumberOfElements()) { - dIntEnergySoil -= mass[MS_SNOWPACK_RUNOFF] * Constants::specific_heat_water * (Xdata.Edata[Xdata.SoilNode].Te - Constants::melting_tk); + dIntEnergySoil -= mass[MS_SNOWPACK_RUNOFF] * Constants::specific_heat_water * (Xdata.Edata[Xdata.SoilNode].Te - Constants::meltfreeze_tk); } if (Xdata.SoilNode == Xdata.getNumberOfElements()) { //Note: at this stage, MS_RAIN is still in kg/m^2! In Main.cc, it is recalculated to kg/m^2/h if PRECIP_RATES==TRUE. - dIntEnergySoil -= (mass[MS_RAIN] + mass[MS_EVAPORATION] + mass[MS_SUBLIMATION]) * Constants::specific_heat_water * (Xdata.Edata[Xdata.SoilNode-1].Te - Constants::melting_tk); + dIntEnergySoil -= (mass[MS_RAIN] + mass[MS_EVAPORATION] + mass[MS_SUBLIMATION]) * Constants::specific_heat_water * (Xdata.Edata[Xdata.SoilNode-1].Te - Constants::meltfreeze_tk); } meltFreezeEnergySoil += Xdata.meltFreezeEnergySoil; } // 6) Collect total masses of snowpack - mass[MS_TOTALMASS] = mass[MS_SWE] = mass[MS_WATER] = 0.; + mass[MS_TOTALMASS] = mass[MS_SWE] = mass[MS_WATER] = mass[MS_WATER_SOIL]= mass[MS_ICE_SOIL]= 0.; Xdata.compSnowpackMasses(); mass[MS_TOTALMASS] = Xdata.mass_sum; mass[MS_SWE] = Xdata.swe; mass[MS_WATER] = Xdata.lwc_sum; + mass[MS_WATER_SOIL] = Xdata.lwc_sum_soil; + mass[MS_ICE_SOIL] = Xdata.swc_sum_soil; } /** @@ -422,7 +451,7 @@ void SurfaceFluxes::multiplyFluxes(const double& factor) mAlbedo *= factor; } -std::iostream& operator<<(std::iostream& os, const SurfaceFluxes& data) +std::ostream& operator<<(std::ostream& os, const SurfaceFluxes& data) { os.write(reinterpret_cast(&data.lw_in), sizeof(data.lw_in)); os.write(reinterpret_cast(&data.lw_out), sizeof(data.lw_out)); @@ -462,7 +491,7 @@ std::iostream& operator<<(std::iostream& os, const SurfaceFluxes& data) return os; } -std::iostream& operator>>(std::iostream& is, SurfaceFluxes& data) +std::istream& operator>>(std::istream& is, SurfaceFluxes& data) { is.read(reinterpret_cast(&data.lw_in), sizeof(data.lw_in)); is.read(reinterpret_cast(&data.lw_out), sizeof(data.lw_out)); @@ -504,6 +533,231 @@ std::iostream& operator>>(std::iostream& is, SurfaceFluxes& data) return is; } + + +/** + * @brief Initialize all the CData elements value with values by default or + *with values read from the SNO file. + *This function is called in SnowStation::initialize, whch is called by XXX in the main(). + * @param SSdata soil data SN_SNOWSOIL_DATA& SSdata created in the main. + * @param useCanopyModel set to true if the canopy model is used + * @param isAlpine3D set to true when calling from Alpine3D in order to prevent the generation of warnings + * @author Adrien Michel + */ + +void CanopyData::initialize(const SN_SNOWSOIL_DATA& SSdata, const bool useCanopyModel, const bool isAlpine3D){ + + int_cap_snow = SSdata.Canopy_int_cap_snow; //iMax in Gouttevin,2015 + if(useCanopyModel && (int_cap_snow < 0.0 || int_cap_snow == mio::IOUtils::nodata )) + { + if(!isAlpine3D){ + std::stringstream msg; + msg << "Value provided for CanopySnowIntCapacity(" << int_cap_snow << ") in soil file is not valid, the default value of 5.9 sill be used."; + prn_msg(__FILE__, __LINE__, "wrn", Date(),msg.str().c_str()); + } + int_cap_snow = 5.9; + } + + /// Specific interception capacity for rain (I_LAI) (mm/LAI) + int_cap_rain = 0.3; + /** Coef in interception function, see (Pomeroy et al,1998) where a value of 0.7 was + * found to be appropriate for hourly time-step, but smaller time steps require smaller + * values, 0.5 was found reasoanble by using the SnowMIP2 data (2007-12-09) + */ + interception_timecoef = 0.5; + + /// RADIATION BALANCE + can_alb_dry = SSdata.Canopy_alb_dry; // Albedo of dry canopy (calibr: 0.09, Alptal) + if(useCanopyModel && (can_alb_dry < 0.0 || can_alb_dry> 1.0 || can_alb_dry == mio::IOUtils::nodata )) { + if(!isAlpine3D){ + std::stringstream msg; + msg << "Value provided for CanopyAlbedoDry (" << can_alb_dry << ") in soil file is not valid, the default value of 0.11 will be used."; + prn_msg(__FILE__, __LINE__, "wrn", Date(),msg.str().c_str()); + } + can_alb_dry = 0.11; + } + + can_alb_wet = SSdata.Canopy_alb_wet; // Albedo of wet canopy (calibr: 0.09, Alptal) + if(useCanopyModel && (can_alb_wet < 0.0 || can_alb_wet > 1.0 || can_alb_wet == mio::IOUtils::nodata )) { + if(!isAlpine3D){ + std::stringstream msg; + msg << "Value provided for CanopyAlbedoWet (" << can_alb_wet << ") in soil file is not valid, the default value of 0.11 will be used."; + prn_msg(__FILE__, __LINE__, "wrn", Date(),msg.str().c_str()); + } + can_alb_wet = 0.11; + } + + can_alb_snow = SSdata.Canopy_alb_snow; // Albedo of snow covered albedo (calibr: 0.35, Alptal) + if(useCanopyModel && (can_alb_snow < 0.0 || can_alb_snow > 1.0 || can_alb_snow == mio::IOUtils::nodata )) { + if(!isAlpine3D){ + std::stringstream msg; + msg << "Value provided for CanopyAlbedoSnow (" << can_alb_snow << ") in soil file is not valid, the default value of 0.35 will be used."; + prn_msg(__FILE__, __LINE__, "wrn", Date(),msg.str().c_str()); + } + can_alb_snow = 0.35; + } + + krnt_lai = .75; // Radiation transmissivity parameter, in the range 0.4-0.8 if the true LAI is used; higher if optical LAI is used. + // (calibrated on Alptal) + can_diameter = SSdata.Canopy_diameter; // average canopy (tree) diameter [m], parameter in the new radiation transfer model + if(!isAlpine3D && useCanopyModel && (can_diameter < 0.0 || can_diameter == mio::IOUtils::nodata )) { + if(!isAlpine3D){ + std::stringstream msg; + msg << "Value provided for CanopyDiameter (" << can_diameter << ") in soil file is not valid, the default value of 1.0 will be used."; + prn_msg(__FILE__, __LINE__, "wrn", Date(),msg.str().c_str()); + } + can_diameter = 1.0; + } + + // ENERGY BALANCE + // parameters for HeatMass and 2layercanopy + biomass_heat_capacity = 2800.; // from Linroth et al., 2013 (J Kg-1 K-1) + biomass_density = 900.; // from Linroth et al., 2013 (Kg m-3) + + lai_frac_top_default = SSdata.Canopy_lai_frac_top_default; // fraction of total LAI that is attributed to the uppermost layer. Here calibrated for Alptal. + if(!isAlpine3D && useCanopyModel && (lai_frac_top_default < 0.0 || lai_frac_top_default > 1.0 || lai_frac_top_default == mio::IOUtils::nodata )) { + if(!isAlpine3D){ + std::stringstream msg; + msg << "Value provided for CanopyFracLAIUpperLayer (" << lai_frac_top_default << ") in soil file is not valid, the default value of 0.5 will be used."; + prn_msg(__FILE__, __LINE__, "wrn", Date(),msg.str().c_str()); + } + lai_frac_top_default = 0.5; + } + + trunk_frac_height = 0.2; // (optional) fraction of total tree height occupied by trunks, + // used to calculate direct solar insolation of trunks. + trunkalb = 0.09; // trunk albedo + et = 1.; // trunk emissivity + /// TURBULENT HEAT EXCHANGE + /// Stab. corr. aerodyn. resist. above and below canopy: 0=off and 1=on (Monin-Obukhov formulation) + canopy_stabilitycorrection = true; + /// Ratio between canopy height and roughness length + roughmom_to_canopyheight_ratio = 0.10; + /// As above for displacement height + displ_to_canopyheight_ratio = 0.6667; + /** + * Fractional increase of aerodynamic resistance for evaporation of intercepted snow. + * - 10.0 from Koivusalo and Kokkonen (2002) + * - 8.0 calibration with Alptal data + */ + raincrease_snow = 10.0; + + /// @brief Maximum allowed canopy temperature change (K hr-1) + canopytemp_maxchange_perhour = 7.0; + /// @brief (~=1, but Not allowed to be exactly 1) + roughheat_to_roughmom_ratio = 0.9999; + /// @brief minimum heat exchange (Wm-2K-1) at zero wind + can_ch0 = 3.; + /// @brief 1+CAN_RS_MULT = maximum factor to increase Cdata->rs below canopy + can_rs_mult = 3.0; + /// @brief TRANSPIRATION + /// @brief Minimum canopy surface resistance, 500 (sm-1) is for needle leaf treas van den Hurk et al (2000) *75% Gustafsson et al (2003) + rsmin = 375.0; + /** + * @brief gd (Pa-1) parameter for canopy surface resistance response to vapour pressure: + * - 0.0003 = trees (needle or broadleafs) + * - 0=crops, grass, tundra etc + */ + f3_gd = 0.0003; + /// @brief Root depth, determining the soil layers influenced by root water uptake + rootdepth = 1.0; + /// @brief Wilting point, defined as a fraction of water content at field capacity (-) + wp_fraction = 0.17; + /// @brief Wilting point pressure head, when using Richards equation for soil. + h_wilt = -1.55E6; + //@} + + // INITIALIZE CANOPY DATA + // State variable + temp = 273.15; // temperature (K) + storage = 0.0; // intercepted water (kg m-2 or mm Water Equivalent) + ec = 1.0; ///< longwave emissivity (1) +// parameters + lai = SSdata.Canopy_LAI; + if(useCanopyModel && (lai < 0.0 || lai == mio::IOUtils::nodata )) { + std::stringstream msg; + msg << "Value provided for LAI (" << lai << ") in soil file is not valid."; + throw UnknownValueException(msg.str(), AT); + } + + z0m = height*0.1; + z0h = z0m*0.1; + zdispl = height*0.66; + + height = SSdata.Canopy_Height; + if(useCanopyModel && (height < 0.0 || height == mio::IOUtils::nodata )) { + std::stringstream msg; + msg << "Value provided for height (" << height << ") in soil file is not ialid."; + throw UnknownValueException(msg.str(), AT); + } + + direct_throughfall = SSdata.Canopy_Direct_Throughfall; + if(useCanopyModel && (direct_throughfall < 0.0 || direct_throughfall >1.0 || direct_throughfall == mio::IOUtils::nodata )) { + std::stringstream msg; + msg << "Value provided for direct throughfall (" << direct_throughfall << ") in soil file is not valid."; + throw UnknownValueException(msg.str(), AT); + } + + sigf= 1.-exp(-krnt_lai * (lai));; ///< radiation transmissivity (1) + + // aerodynamic resistances + ra = 0.; ///< from canopy air to reference height + rc = 0.; ///< from canopy to canopy air + rs = 0.; ///< from subsurface to canpopy air + rstransp = 0.; //< stomatal surface resistance for transpiration + // Averaged variables + canopyalb = can_alb_dry; ///< canopy albedo [-] + totalalb=0; ///< total albedo above canopy and snow/soil surface [-] + wetfraction = 0.; ///< fraction of canopy covered by interception [-] + intcapacity = 0.; ///< maximum interception storage [mm] + // Radiations + rswrac = 0.; ///< upward shortwave above canopy + iswrac = 0.; ///< downward shortwave radiation above canopy + rswrbc = 0.; ///< upward shortwave below canopy + iswrbc = 0.; ///< downward shortwave radiation below canopy + ilwrac = 0.; ///< downward longwave radiation ABOVE canopy + rlwrac = 0.; ///< upward longwave radiation ABOVE canopy + ilwrbc = 0.; ///< downward longwave radiation BELOW canopy + rlwrbc = 0.; ///< upward longwave radiation BELOW canopy + rsnet = 0.; ///< net shortwave radiation + rlnet = 0.; ///< net longwave radiation + // Turbulent fluxes + sensible = 0.; + latent = 0.; + latentcorr = 0.; + // Evap fluxes + transp = 0.; + intevap = 0.; + // Mass fluxes + interception = 0.; + throughfall = 0.; + snowunload = 0.; + + snowfac = 0.; ///< snowfall above canopy + rainfac = 0.; ///< rainfall above canopy + liquidfraction = 0.; + sigftrunk = 0.; ///< radiation interception cross section for trunk layer () + Ttrunk = 273.15; // trunk temperature (K) + CondFluxCanop = 0.; ///< biomass heat storage flux towards Canopy (if 1L) towards Leaves (if 2L). (>0 towards canopy) + CondFluxTrunks = 0.; ///< biomass heat storage flux towards Trunks (if 2L) + LWnet_Trunks = 0.; ///< net LW to trunks (>0 towards trunks) + SWnet_Trunks= 0.; ///< net SW to trunks (>0 towards trunks) + QStrunks = 0.; ///< sensible heat flux from trunks (>0 if heat lost from trunk) + forestfloor_alb = 0.; ///< albedo of the forest floor + BasalArea = SSdata.Canopy_BasalArea; ///< basal area of trees on the stand + if(useCanopyModel && (BasalArea < 0.0 || BasalArea == mio::IOUtils::nodata )) { + if(!isAlpine3D) { + std::stringstream msg; + msg << "Value provided for CanopyBasalArea (" << BasalArea << ") in soil file is not valid, the default value of 0.004 will be used."; + prn_msg(__FILE__, __LINE__, "wrn", Date(),msg.str().c_str()); + } + BasalArea=0.004; + } + + HMLeaves=3.*4190.; ///< Leaves heat mass (J K-1 /m2 ground surface) + HMTrunks=30.*4190.; ///< Trunks heat mass (J K-1 /m2 ground surface) +} + void CanopyData::reset(const bool& cumsum_mass) { if (cumsum_mass) { // Do not reset cumulated mass balance @@ -573,7 +827,7 @@ void CanopyData::multiplyFluxes(const double& factor) intcapacity *= factor; } -std::iostream& operator<<(std::iostream& os, const CanopyData& data) +std::ostream& operator<<(std::ostream& os, const CanopyData& data) { os.write(reinterpret_cast(&data.storage), sizeof(data.storage)); os.write(reinterpret_cast(&data.temp), sizeof(data.temp)); @@ -617,10 +871,53 @@ std::iostream& operator<<(std::iostream& os, const CanopyData& data) os.write(reinterpret_cast(&data.interception), sizeof(data.interception)); os.write(reinterpret_cast(&data.throughfall), sizeof(data.throughfall)); os.write(reinterpret_cast(&data.snowunload), sizeof(data.snowunload)); + + os.write(reinterpret_cast(&data.snowfac), sizeof(data.snowfac)); + os.write(reinterpret_cast(&data.rainfac), sizeof(data.rainfac)); + os.write(reinterpret_cast(&data.liquidfraction), sizeof(data.liquidfraction)); + os.write(reinterpret_cast(&data.sigftrunk), sizeof(data.sigftrunk)); + os.write(reinterpret_cast(&data.Ttrunk), sizeof(data.Ttrunk)); + os.write(reinterpret_cast(&data.CondFluxCanop), sizeof(data.CondFluxCanop)); + os.write(reinterpret_cast(&data.CondFluxTrunks), sizeof(data.CondFluxTrunks)); + os.write(reinterpret_cast(&data.LWnet_Trunks), sizeof(data.LWnet_Trunks)); + os.write(reinterpret_cast(&data.SWnet_Trunks), sizeof(data.SWnet_Trunks)); + os.write(reinterpret_cast(&data.QStrunks), sizeof(data.QStrunks)); + os.write(reinterpret_cast(&data.forestfloor_alb), sizeof(data.forestfloor_alb)); + os.write(reinterpret_cast(&data.BasalArea), sizeof(data.BasalArea)); + os.write(reinterpret_cast(&data.HMLeaves), sizeof(data.HMLeaves)); + os.write(reinterpret_cast(&data.HMTrunks), sizeof(data.HMTrunks)); + + os.write(reinterpret_cast< const char*>(&data.int_cap_snow), sizeof(data.int_cap_snow)); + os.write(reinterpret_cast< const char*>(&data.int_cap_rain), sizeof(data.int_cap_rain)); + os.write(reinterpret_cast< const char*>(&data.interception_timecoef), sizeof(data.interception_timecoef)); + os.write(reinterpret_cast(&data.can_alb_dry), sizeof(data.can_alb_dry)); + os.write(reinterpret_cast(&data.can_alb_wet), sizeof(data.can_alb_wet)); + os.write(reinterpret_cast(&data.can_alb_snow), sizeof(data.can_alb_snow)); + os.write(reinterpret_cast(&data.krnt_lai), sizeof(data.krnt_lai)); + os.write(reinterpret_cast(&data.can_diameter), sizeof(data.can_diameter)); + os.write(reinterpret_cast(&data.biomass_heat_capacity), sizeof(data.biomass_heat_capacity)); + os.write(reinterpret_cast(&data.lai_frac_top_default), sizeof(data.lai_frac_top_default)); + os.write(reinterpret_cast(&data.trunk_frac_height), sizeof(data.trunk_frac_height)); + os.write(reinterpret_cast(&data.trunkalb), sizeof(data.trunkalb)); + os.write(reinterpret_cast(&data.et), sizeof(data.et)); + os.write(reinterpret_cast(&data.canopy_stabilitycorrection), sizeof(data.canopy_stabilitycorrection)); + os.write(reinterpret_cast(&data.roughmom_to_canopyheight_ratio), sizeof(data.roughmom_to_canopyheight_ratio)); + os.write(reinterpret_cast(&data.displ_to_canopyheight_ratio), sizeof(data.displ_to_canopyheight_ratio)); + os.write(reinterpret_cast(&data.raincrease_snow), sizeof(data.raincrease_snow)); + os.write(reinterpret_cast(&data.canopytemp_maxchange_perhour), sizeof(data.canopytemp_maxchange_perhour)); + os.write(reinterpret_cast(&data.roughheat_to_roughmom_ratio), sizeof(data.roughheat_to_roughmom_ratio)); + os.write(reinterpret_cast(&data.can_ch0), sizeof(data.can_ch0)); + os.write(reinterpret_cast(&data.can_rs_mult), sizeof(data.can_rs_mult)); + os.write(reinterpret_cast(&data.rsmin), sizeof(data.rsmin)); + os.write(reinterpret_cast(&data.f3_gd), sizeof(data.f3_gd)); + os.write(reinterpret_cast(&data.rootdepth), sizeof(data.rootdepth)); + os.write(reinterpret_cast(&data.wp_fraction), sizeof(data.wp_fraction)); + os.write(reinterpret_cast(&data.h_wilt), sizeof(data.h_wilt)); + return os; } -std::iostream& operator>>(std::iostream& is, CanopyData& data) +std::istream& operator>>(std::istream& is, CanopyData& data) { is.read(reinterpret_cast(&data.storage), sizeof(data.storage)); is.read(reinterpret_cast(&data.temp), sizeof(data.temp)); @@ -664,47 +961,141 @@ std::iostream& operator>>(std::iostream& is, CanopyData& data) is.read(reinterpret_cast(&data.interception), sizeof(data.interception)); is.read(reinterpret_cast(&data.throughfall), sizeof(data.throughfall)); is.read(reinterpret_cast(&data.snowunload), sizeof(data.snowunload)); + + is.read(reinterpret_cast(&data.snowfac), sizeof(data.snowfac)); + is.read(reinterpret_cast(&data.rainfac), sizeof(data.rainfac)); + is.read(reinterpret_cast(&data.liquidfraction), sizeof(data.liquidfraction)); + is.read(reinterpret_cast(&data.sigftrunk), sizeof(data.sigftrunk)); + is.read(reinterpret_cast(&data.Ttrunk), sizeof(data.Ttrunk)); + is.read(reinterpret_cast(&data.CondFluxCanop), sizeof(data.CondFluxCanop)); + is.read(reinterpret_cast(&data.CondFluxTrunks), sizeof(data.CondFluxTrunks)); + is.read(reinterpret_cast(&data.LWnet_Trunks), sizeof(data.LWnet_Trunks)); + is.read(reinterpret_cast(&data.SWnet_Trunks), sizeof(data.SWnet_Trunks)); + is.read(reinterpret_cast(&data.QStrunks), sizeof(data.QStrunks)); + is.read(reinterpret_cast(&data.forestfloor_alb), sizeof(data.forestfloor_alb)); + is.read(reinterpret_cast(&data.BasalArea), sizeof(data.BasalArea)); + is.read(reinterpret_cast(&data.HMLeaves), sizeof(data.HMLeaves)); + is.read(reinterpret_cast(&data.HMTrunks), sizeof(data.HMTrunks)); + + is.read(reinterpret_cast(&data.int_cap_snow), sizeof(data.int_cap_snow)); + is.read(reinterpret_cast(&data.int_cap_rain), sizeof(data.int_cap_rain)); + is.read(reinterpret_cast(&data.interception_timecoef), sizeof(data.interception_timecoef)); + is.read(reinterpret_cast(&data.can_alb_dry), sizeof(data.can_alb_dry)); + is.read(reinterpret_cast(&data.can_alb_wet), sizeof(data.can_alb_wet)); + is.read(reinterpret_cast(&data.can_alb_snow), sizeof(data.can_alb_snow)); + is.read(reinterpret_cast(&data.krnt_lai), sizeof(data.krnt_lai)); + is.read(reinterpret_cast(&data.can_diameter), sizeof(data.can_diameter)); + is.read(reinterpret_cast(&data.biomass_heat_capacity), sizeof(data.biomass_heat_capacity)); + is.read(reinterpret_cast(&data.lai_frac_top_default), sizeof(data.lai_frac_top_default)); + is.read(reinterpret_cast(&data.trunk_frac_height), sizeof(data.trunk_frac_height)); + is.read(reinterpret_cast(&data.trunkalb), sizeof(data.trunkalb)); + is.read(reinterpret_cast(&data.et), sizeof(data.et)); + is.read(reinterpret_cast(&data.canopy_stabilitycorrection), sizeof(data.canopy_stabilitycorrection)); + is.read(reinterpret_cast(&data.roughmom_to_canopyheight_ratio), sizeof(data.roughmom_to_canopyheight_ratio)); + is.read(reinterpret_cast(&data.displ_to_canopyheight_ratio), sizeof(data.displ_to_canopyheight_ratio)); + is.read(reinterpret_cast(&data.raincrease_snow), sizeof(data.raincrease_snow)); + is.read(reinterpret_cast(&data.canopytemp_maxchange_perhour), sizeof(data.canopytemp_maxchange_perhour)); + is.read(reinterpret_cast(&data.roughheat_to_roughmom_ratio), sizeof(data.roughheat_to_roughmom_ratio)); + is.read(reinterpret_cast(&data.can_ch0), sizeof(data.can_ch0)); + is.read(reinterpret_cast(&data.can_rs_mult), sizeof(data.can_rs_mult)); + is.read(reinterpret_cast(&data.rsmin), sizeof(data.rsmin)); + is.read(reinterpret_cast(&data.f3_gd), sizeof(data.f3_gd)); + is.read(reinterpret_cast(&data.rootdepth), sizeof(data.rootdepth)); + is.read(reinterpret_cast(&data.wp_fraction), sizeof(data.wp_fraction)); + is.read(reinterpret_cast(&data.h_wilt), sizeof(data.h_wilt)); + return is; } const std::string CanopyData::toString() const { - std::stringstream os; + std::ostringstream os; os << "" << "\n"; - - os << "\t\n"; - os << "\tstorage: " << storage << "\n"; - os << "\ttemp: " << temp << "\n"; - os << "\tsigf: " << sigf << "\tec: " << ec << "\n"; - os << "\t\n\t\n"; - os << "\theight: " << height << "\n"; - os << "\tlai: " << lai << "\tdirect_throughfall: " << direct_throughfall << "\n"; - os << "\tz0m: " << z0m << "\tz0h: " << z0h << "\n"; - os << "\tzdispl: " << zdispl << "\n"; - os << "\t\n\t\n"; - os << "\tra: " << ra << " rc: " << rc << " rs: " << rs << "\n"; - os << "\trstransp: " << rstransp << "\n"; - os << "\t\n\t\n"; - os << "\tcanopyalb: " << canopyalb << " totalalb: " << totalalb << "\n"; - os << "\twetfraction: " << wetfraction << "\n"; - os << "\tintcapacity: " << intcapacity << "\n"; - os << "\t\n\t\n"; - os << "\trswrac: " << rswrac << " iswrac: " << iswrac << "\n"; - os << "\trswrbc: " << rswrbc << " iswrbc: " << iswrbc << "\n"; - os << "\tilwrac: " << ilwrac << " rlwrac: " << rlwrac << "\n"; - os << "\tilwrbc: " << ilwrbc << " rlwrbc: " << rlwrbc << "\n"; - os << "\trsnet: " << rsnet << " rlnet: " << rlnet << "\n"; - os << "\t\n\t\n"; - os << "\tsensible: " << sensible << "\n"; - os << "\tlatent: " << latent << " latentcorr: " << latentcorr << "\n"; - os << "\t\n\t\n"; - os << "\ttransp: " << transp << "\n"; - os << "\tintevap: " << intevap << "\n"; - os << "\t\n\t\n"; - os << "\tinterception: " << interception << "\n"; - os << "\tthroughfall: " << throughfall << "\n"; - os << "\tsnowunload: " << snowunload << "\n"; - os << "\t\n\n"; + os << "\tstorage: " << storage << "\n"; + os << "\ttemp: " << temp << "\n"; + os << "\tsigf: " << sigf << "\n"; + os << "\tec: " << ec << "\n"; + os << "\theight: " << height << "\n"; + os << "\tlai: " << lai << "\n"; + os << "\tdirect_throughfall: " << direct_throughfall << "\n"; + os << "\tz0m: " << z0m << "\n"; + os << "\tz0h: " << z0h << "\n"; + os << "\tzdispl: " << zdispl << "\n"; + os << "_____________________________________" << "\n"; + os << "\tra: " << ra << "\n"; + os << "\trc: " << rc << "\n"; + os << "\tdrs: " << rs << "\n"; + os << "\trstransp: " << rstransp << "\n"; + os << "\tcanopyalb: " << canopyalb << "\n"; + os << "\ttotalalb: " << totalalb << "\n"; + os << "\twetfraction: " << wetfraction << "\n"; + os << "\tintcapacity: " << intcapacity << "\n"; + os << "\trswrac: " << rswrac << "\n"; + os << "\tiswrac: " << iswrac << "\n"; + os << "_____________________________________" << "\n"; + os << "\tiswrbc: " << iswrbc << "\n"; + os << "\tilwrac: " << ilwrac << "\n"; + os << "\trlwrac: " << rlwrac << "\n"; + os << "\tilwrbc: " << ilwrbc << "\n"; + os << "\trlwrbc: " << rlwrbc << "\n"; + os << "\trsnet: " << rsnet << "\n"; + os << "\trlnet: " << rlnet << "\n"; + os << "\tsensible: " << sensible << "\n"; + os << "\tlatent: " << latent << "\n"; + os << "\tlatentcorr: " << latentcorr << "\n"; + os << "_____________________________________" << "\n"; + os << "\ttransp: " << transp << "\n"; + os << "\tintevap: " << intevap << "\n"; + os << "\tinterception: " << interception << "\n"; + os << "\tthroughfall: " << throughfall << "\n"; + os << "\tsnowunload: " << snowunload << "\n"; + os << "\tint_cap_snow: " << int_cap_snow << "\n"; + os << "\tint_cap_rain: " << int_cap_rain << "\n"; + os << "\tinterception_timecoef: " << interception_timecoef << "\n"; + os << "\tcan_alb_dry: " << can_alb_dry << "\n"; + os << "\tcan_alb_wet: " << can_alb_wet << "\n"; + os << "_____________________________________" << "\n"; + os << "\tcan_alb_snow: " << can_alb_snow << "\n"; + os << "\tkrnt_lai: " << krnt_lai << "\n"; + os << "\tcan_diameter: " << can_diameter << "\n"; + os << "\tbiomass_heat_capacity: " << biomass_heat_capacity << "\n"; + os << "\tbiomass_density: " << biomass_density << "\n"; + os << "\tlai_frac_top_default: " << lai_frac_top_default << "\n"; + os << "\ttrunk_frac_height: " << trunk_frac_height << "\n"; + os << "\ttrunkalb: " << trunkalb << "\n"; + os << "\tet: " << et << "\n"; + os << "\tcanopy_stabilitycorrection: " << canopy_stabilitycorrection << "\n"; + os << "_____________________________________" << "\n"; + os << "\troughmom_to_canopyheight_ratio: " << roughmom_to_canopyheight_ratio << "\n"; + os << "\tdispl_to_canopyheight_ratio: " << displ_to_canopyheight_ratio << "\n"; + os << "\traincrease_snow: " << raincrease_snow << "\n"; + os << "\tcanopytemp_maxchange_perhour: " << canopytemp_maxchange_perhour << "\n"; + os << "\troughheat_to_roughmom_ratio: " << roughheat_to_roughmom_ratio << "\n"; + os << "\tcan_ch0: " << can_ch0 << "\n"; + os << "\tcan_rs_mult: " << can_rs_mult << "\n"; + os << "\trsmin: " << rsmin << "\n"; + os << "\tf3_gd: " << f3_gd << "\n"; + os << "\trootdepth: " << rootdepth << "\n"; + os << "_____________________________________" << "\n"; + os << "\twp_fraction: " << wp_fraction << "\n"; + os << "\th_wilt: " << h_wilt << "\n"; + os << "\tsnowfac: " << snowfac << "\n"; + os << "\trainfac: " << rainfac << "\n"; + os << "\tliquidfraction: " << liquidfraction << "\n"; + os << "\tsigftrunk: " << sigftrunk << "\n"; + os << "\tTtrunk: " << Ttrunk << "\n"; + os << "\tCondFluxCanop: " << CondFluxCanop << "\n"; + os << "\tCondFluxTrunks: " << CondFluxTrunks << "\n"; + os << "\tLWnet_Trunks: " << LWnet_Trunks << "\n"; + os << "_____________________________________" << "\n"; + os << "\tSWnet_Trunks: " << SWnet_Trunks << "\n"; + os << "\tQStrunks: " << QStrunks << "\n"; + os << "\tforestfloor_alb: " << forestfloor_alb << "\n"; + os << "\tBasalArea: " << BasalArea << "\n"; + os << "\tHMLeaves: " << HMLeaves << "\n"; + os << "\tHMTrunks: " << HMLeaves << "\n"; + + os << "\n"; return os.str(); } @@ -752,29 +1143,45 @@ void CanopyData::initializeSurfaceExchangeData() } // Class ElementData -ElementData::ElementData() : depositionDate(), L0(0.), L(0.), - Te(0.), gradT(0.), melting_tk(Constants::melting_tk), freezing_tk(Constants::freezing_tk), - theta((size_t)N_COMPONENTS), conc((size_t)N_COMPONENTS, SnowStation::number_of_solutes), k((size_t)N_SN_FIELDS), c((size_t)N_SN_FIELDS), soil((size_t)N_SOIL_FIELDS), +const unsigned short int ElementData::noID = static_cast(-1); +ElementData::ElementData(const unsigned short int& in_ID) : depositionDate(), L0(0.), L(0.), + Te(0.), gradT(0.), meltfreeze_tk(Constants::meltfreeze_tk), + theta((size_t)N_COMPONENTS), h(Constants::undefined), conc((size_t)N_COMPONENTS, SnowStation::number_of_solutes), k((size_t)N_SN_FIELDS), c((size_t)N_SN_FIELDS), soil((size_t)N_SOIL_FIELDS), Rho(0.), M(0.), sw_abs(0.), rg(0.), dd(0.), sp(0.), ogs(0.), rb(0.), N3(0.), mk(0), - type(0), metamo(0.), dth_w(0.), res_wat_cont(0.), Qmf(0.), QIntmf(0.), + type(0), metamo(0.), salinity(0.), dth_w(0.), res_wat_cont(0.), Qmf(0.), QIntmf(0.), dEps(0.), Eps(0.), Eps_e(0.), Eps_v(0.), Eps_Dot(0.), Eps_vDot(0.), E(0.), S(0.), C(0.), CDot(0.), ps2rb(0.), - s_strength(0.), hard(IOUtils::nodata), S_dr(0.), crit_cut_length(Constants::undefined), theta_r(0.), lwc_source(0.), dhf(0.) {} - -std::iostream& operator<<(std::iostream& os, const ElementData& data) + s_strength(0.), hard(0.), S_dr(0.), crit_cut_length(Constants::undefined), VG(*this), lwc_source(0.), PrefFlowArea(0.), + theta_w_transfer(0.), theta_i_reservoir(0.), theta_i_reservoir_cumul(0.), + SlopeParFlux(0.), Qph_up(0.), Qph_down(0.), dsm(0.), rime(0.), ID(in_ID), rhov(0.), Qmm(0.), vapTrans_fluxDiff(0.), vapTrans_snowDenChangeRate(0.), vapTrans_cumulativeDenChange(0.), vapTrans_underSaturationDegree(0.) {} + +ElementData::ElementData(const ElementData& cc) : + depositionDate(cc.depositionDate), L0(cc.L0), L(cc.L), + Te(cc.Te), gradT(cc.gradT), meltfreeze_tk(cc.meltfreeze_tk), + theta(cc.theta), h(cc.h), conc(cc.conc), k(cc.k), c(cc.c), soil(cc.soil), + Rho(cc.Rho), M(cc.M), sw_abs(cc.sw_abs), + rg(cc.rg), dd(cc.dd), sp(cc.sp), ogs(cc.ogs), rb(cc.rb), N3(cc.N3), mk(cc.mk), + type(cc.type), metamo(cc.metamo), salinity(cc.salinity), dth_w(cc.dth_w), res_wat_cont(cc.res_wat_cont), Qmf(cc.Qmf), QIntmf(cc.QIntmf), + dEps(cc.dEps), Eps(cc.Eps), Eps_e(cc.Eps_e), Eps_v(cc.Eps_v), Eps_Dot(cc.Eps_Dot), Eps_vDot(cc.Eps_vDot), E(cc.E), + S(cc.S), C(cc.C), CDot(cc.CDot), ps2rb(cc.ps2rb), + s_strength(cc.s_strength), hard(cc.hard), S_dr(cc.S_dr), crit_cut_length(cc.crit_cut_length), VG(*this), lwc_source(cc.lwc_source), PrefFlowArea(cc.PrefFlowArea), + theta_w_transfer(cc.theta_w_transfer), theta_i_reservoir(cc.theta_i_reservoir), theta_i_reservoir_cumul(cc.theta_i_reservoir_cumul), + SlopeParFlux(cc.SlopeParFlux), Qph_up(cc.Qph_up), Qph_down(cc.Qph_down), dsm(cc.dsm), rime(cc.rime), ID(cc.ID), rhov(cc.rhov), Qmm(cc.Qmm), vapTrans_fluxDiff(cc.vapTrans_fluxDiff), vapTrans_snowDenChangeRate(cc.vapTrans_snowDenChangeRate), vapTrans_cumulativeDenChange(cc.vapTrans_cumulativeDenChange), vapTrans_underSaturationDegree(cc.vapTrans_underSaturationDegree) {} + +std::ostream& operator<<(std::ostream& os, const ElementData& data) { os << data.depositionDate; os.write(reinterpret_cast(&data.L0), sizeof(data.L0)); os.write(reinterpret_cast(&data.L), sizeof(data.L)); os.write(reinterpret_cast(&data.Te), sizeof(data.Te)); os.write(reinterpret_cast(&data.gradT), sizeof(data.gradT)); - os.write(reinterpret_cast(&data.melting_tk), sizeof(data.melting_tk)); - os.write(reinterpret_cast(&data.freezing_tk), sizeof(data.freezing_tk)); + os.write(reinterpret_cast(&data.meltfreeze_tk), sizeof(data.meltfreeze_tk)); const size_t s_theta = data.theta.size(); os.write(reinterpret_cast(&s_theta), sizeof(size_t)); os.write(reinterpret_cast(&data.theta[0]), static_cast(s_theta*sizeof(data.theta[0]))); + os.write(reinterpret_cast(&data.h), sizeof(data.h)); os << data.conc; const size_t s_k = data.k.size(); @@ -789,6 +1196,10 @@ std::iostream& operator<<(std::iostream& os, const ElementData& data) os.write(reinterpret_cast(&s_soil), sizeof(size_t)); os.write(reinterpret_cast(&data.soil[0]), static_cast(s_soil*sizeof(data.soil[0]))); + os.write(reinterpret_cast(&data.theta_i_reservoir), sizeof(data.theta_i_reservoir)); + os.write(reinterpret_cast(&data.theta_i_reservoir_cumul), sizeof(data.theta_i_reservoir_cumul)); + os.write(reinterpret_cast(&data.theta_w_transfer), sizeof(data.theta_w_transfer)); + os.write(reinterpret_cast(&data.Rho), sizeof(data.Rho)); os.write(reinterpret_cast(&data.M), sizeof(data.M)); os.write(reinterpret_cast(&data.sw_abs), sizeof(data.sw_abs)); @@ -801,9 +1212,11 @@ std::iostream& operator<<(std::iostream& os, const ElementData& data) os.write(reinterpret_cast(&data.mk), sizeof(data.mk)); os.write(reinterpret_cast(&data.type), sizeof(data.type)); os.write(reinterpret_cast(&data.metamo), sizeof(data.metamo)); + os.write(reinterpret_cast(&data.salinity), sizeof(data.salinity)); os.write(reinterpret_cast(&data.dth_w), sizeof(data.dth_w)); os.write(reinterpret_cast(&data.res_wat_cont), sizeof(data.res_wat_cont)); os.write(reinterpret_cast(&data.Qmf), sizeof(data.Qmf)); + os.write(reinterpret_cast(&data.QIntmf), sizeof(data.QIntmf)); os.write(reinterpret_cast(&data.dEps), sizeof(data.dEps)); os.write(reinterpret_cast(&data.Eps), sizeof(data.Eps)); @@ -820,26 +1233,38 @@ std::iostream& operator<<(std::iostream& os, const ElementData& data) os.write(reinterpret_cast(&data.s_strength), sizeof(data.s_strength)); os.write(reinterpret_cast(&data.hard), sizeof(data.hard)); os.write(reinterpret_cast(&data.S_dr), sizeof(data.S_dr)); - os.write(reinterpret_cast(&data.theta_r), sizeof(data.theta_r)); + os.write(reinterpret_cast(&data.crit_cut_length), sizeof(data.crit_cut_length)); + os.write(reinterpret_cast(&data.VG), sizeof(data.VG)); os.write(reinterpret_cast(&data.lwc_source), sizeof(data.lwc_source)); - os.write(reinterpret_cast(&data.dhf), sizeof(data.dhf)); + os.write(reinterpret_cast(&data.PrefFlowArea), sizeof(data.PrefFlowArea)); + os.write(reinterpret_cast(&data.SlopeParFlux), sizeof(data.SlopeParFlux)); + os.write(reinterpret_cast(&data.Qph_up), sizeof(data.Qph_up)); + os.write(reinterpret_cast(&data.Qph_down), sizeof(data.Qph_down)); + os.write(reinterpret_cast(&data.dsm), sizeof(data.dsm)); + os.write(reinterpret_cast(&data.ID), sizeof(data.ID)); + os.write(reinterpret_cast(&data.rhov), sizeof(data.rhov)); + os.write(reinterpret_cast(&data.Qmm), sizeof(data.Qmm)); + os.write(reinterpret_cast(&data.vapTrans_fluxDiff), sizeof(data.vapTrans_fluxDiff)); + os.write(reinterpret_cast(&data.vapTrans_snowDenChangeRate), sizeof(data.vapTrans_snowDenChangeRate)); + os.write(reinterpret_cast(&data.vapTrans_cumulativeDenChange), sizeof(data.vapTrans_cumulativeDenChange)); + os.write(reinterpret_cast(&data.vapTrans_underSaturationDegree), sizeof(data.vapTrans_underSaturationDegree)); return os; } -std::iostream& operator>>(std::iostream& is, ElementData& data) +std::istream& operator>>(std::istream& is, ElementData& data) { is >> data.depositionDate; is.read(reinterpret_cast(&data.L0), sizeof(data.L0)); is.read(reinterpret_cast(&data.L), sizeof(data.L)); is.read(reinterpret_cast(&data.Te), sizeof(data.Te)); is.read(reinterpret_cast(&data.gradT), sizeof(data.gradT)); - is.read(reinterpret_cast(&data.melting_tk), sizeof(data.melting_tk)); - is.read(reinterpret_cast(&data.freezing_tk), sizeof(data.freezing_tk)); + is.read(reinterpret_cast(&data.meltfreeze_tk), sizeof(data.meltfreeze_tk)); size_t s_theta; is.read(reinterpret_cast(&s_theta), sizeof(size_t)); data.theta.resize(s_theta); is.read(reinterpret_cast(&data.theta[0]), static_cast(s_theta*sizeof(data.theta[0]))); + is.read(reinterpret_cast(&data.h), sizeof(data.h)); is >> data.conc; size_t s_k; @@ -857,6 +1282,10 @@ std::iostream& operator>>(std::iostream& is, ElementData& data) data.soil.resize(s_soil); is.read(reinterpret_cast(&data.soil[0]), static_cast(s_soil*sizeof(data.soil[0]))); + is.read(reinterpret_cast(&data.theta_i_reservoir), sizeof(data.theta_i_reservoir)); + is.read(reinterpret_cast(&data.theta_i_reservoir_cumul), sizeof(data.theta_i_reservoir_cumul)); + is.read(reinterpret_cast(&data.theta_w_transfer), sizeof(data.theta_w_transfer)); + is.read(reinterpret_cast(&data.Rho), sizeof(data.Rho)); is.read(reinterpret_cast(&data.M), sizeof(data.M)); is.read(reinterpret_cast(&data.sw_abs), sizeof(data.sw_abs)); @@ -869,9 +1298,11 @@ std::iostream& operator>>(std::iostream& is, ElementData& data) is.read(reinterpret_cast(&data.mk), sizeof(data.mk)); is.read(reinterpret_cast(&data.type), sizeof(data.type)); is.read(reinterpret_cast(&data.metamo), sizeof(data.metamo)); + is.read(reinterpret_cast(&data.salinity), sizeof(data.salinity)); is.read(reinterpret_cast(&data.dth_w), sizeof(data.dth_w)); is.read(reinterpret_cast(&data.res_wat_cont), sizeof(data.res_wat_cont)); is.read(reinterpret_cast(&data.Qmf), sizeof(data.Qmf)); + is.read(reinterpret_cast(&data.QIntmf), sizeof(data.QIntmf)); is.read(reinterpret_cast(&data.dEps), sizeof(data.dEps)); is.read(reinterpret_cast(&data.Eps), sizeof(data.Eps)); @@ -888,27 +1319,42 @@ std::iostream& operator>>(std::iostream& is, ElementData& data) is.read(reinterpret_cast(&data.s_strength), sizeof(data.s_strength)); is.read(reinterpret_cast(&data.hard), sizeof(data.hard)); is.read(reinterpret_cast(&data.S_dr), sizeof(data.S_dr)); - is.read(reinterpret_cast(&data.theta_r), sizeof(data.theta_r)); + is.read(reinterpret_cast(&data.crit_cut_length), sizeof(data.crit_cut_length)); + is.read(reinterpret_cast(&data.VG), sizeof(data.VG)); + data.VG.EMS = &data; is.read(reinterpret_cast(&data.lwc_source), sizeof(data.lwc_source)); - is.read(reinterpret_cast(&data.dhf), sizeof(data.dhf)); + is.read(reinterpret_cast(&data.PrefFlowArea), sizeof(data.PrefFlowArea)); + is.read(reinterpret_cast(&data.SlopeParFlux), sizeof(data.SlopeParFlux)); + is.read(reinterpret_cast(&data.Qph_up), sizeof(data.Qph_up)); + is.read(reinterpret_cast(&data.Qph_down), sizeof(data.Qph_down)); + is.read(reinterpret_cast(&data.dsm), sizeof(data.dsm)); + is.read(reinterpret_cast(&data.ID), sizeof(data.ID)); + is.read(reinterpret_cast(&data.rhov), sizeof(data.rhov)); + is.read(reinterpret_cast(&data.Qmm), sizeof(data.Qmm)); + is.read(reinterpret_cast(&data.vapTrans_fluxDiff), sizeof(data.vapTrans_fluxDiff)); + is.read(reinterpret_cast(&data.vapTrans_snowDenChangeRate), sizeof(data.vapTrans_snowDenChangeRate)); + is.read(reinterpret_cast(&data.vapTrans_cumulativeDenChange), sizeof(data.vapTrans_cumulativeDenChange)); + is.read(reinterpret_cast(&data.vapTrans_underSaturationDegree), sizeof(data.vapTrans_underSaturationDegree)); return is; } double ElementData::getYoungModule(const double& rho_slab, const Young_Modulus& model) { + if (rho_slab<=0.) throw mio::InvalidArgumentException("Evaluating Young's module on an element with negative density", AT); + switch (model) { case Sigrist: {//This is the parametrization by Sigrist, 2006 static const double A = 968.e6; //in Pa - const double E = A * pow( rho_slab/Constants::density_ice, 2.94 ); //in Pa - return E; + const double E_local = A * pow( rho_slab/Constants::density_ice, 2.94 ); //in Pa + return E_local; } case Pow: { - const double E = 5.07e9 * (pow((rho_slab/Constants::density_ice), 5.13)); - return E; + const double E_local = 5.07e9 * (pow((rho_slab/Constants::density_ice), 5.13)); + return E_local; } case Exp: { - const double E = 1.873e5 * exp(0.0149*(rho_slab)); - return E; + const double E_local = 1.873e5 * exp(0.0149*(rho_slab)); + return E_local; } default: throw mio::UnknownValueException("Selected Young's modulus model has not been implemented", AT); @@ -920,7 +1366,7 @@ double ElementData::getYoungModule(const double& rho_slab, const Young_Modulus& * @version 11.01 * @return sum of volumetric contents (1) */ -bool ElementData::checkVolContent() const +bool ElementData::checkVolContent() { bool ret = true; /*if(fabs(L*Rho - M) > 0.001) { @@ -932,7 +1378,7 @@ bool ElementData::checkVolContent() const for (unsigned int i = 0; i < N_COMPONENTS; i++) { sum += theta[i]; } - if (sum <= 0.99 || sum >= 1.01) { + if (sum < 1. - 2.*Constants::eps || sum > 1. + 2.*Constants::eps) { prn_msg(__FILE__, __LINE__, "wrn", Date(), "SUM of volumetric contents = %1.4f", sum); ret = false; } @@ -953,6 +1399,22 @@ bool ElementData::checkVolContent() const ret = false; } + // Take care of small rounding errors, in case large rounding errors do not exist. + if (ret == true) { + theta[ICE] = std::min(1., std::max(0., theta[ICE])); + theta[WATER] = std::min(1., std::max(0., theta[WATER])); + theta[WATER_PREF] = std::min(1., std::max(0., theta[WATER_PREF])); + theta[AIR] = (1. - theta[ICE] - theta[WATER] - theta[WATER_PREF] - theta[SOIL]); + if (theta[AIR] < 0.) { + if (theta[ICE] > 1. - Constants::eps) { + theta[ICE] += theta[AIR]; + } else if (theta[WATER] > 1. - Constants::eps) { + theta[WATER] += theta[AIR]; + } + theta[AIR] = 0.; + } + } + return ret; } @@ -967,20 +1429,35 @@ void ElementData::heatCapacity() c_p = Constants::density_air * theta[AIR] * Constants::specific_heat_air; c_p += Constants::density_ice * theta[ICE] * Constants::specific_heat_ice; - c_p += Constants::density_water * theta[WATER] * Constants::specific_heat_water; + c_p += Constants::density_water * (theta[WATER] + theta[WATER_PREF]) * Constants::specific_heat_water; c_p += soil[SOIL_RHO] * theta[SOIL] * soil[SOIL_C]; c_p /= Rho; c[TEMPERATURE] = c_p; } /** - * @brief Computes cold content of an element, taking melting_tk as reference + * @brief Computes cold content of an element, taking meltfreeze_tk as reference * @version 10.08 * @return Cold content (J m-2) */ double ElementData::coldContent() const { - return (Rho * c[TEMPERATURE] * (Te - Constants::melting_tk) * L); + return (Rho * c[TEMPERATURE] * (Te - Constants::meltfreeze_tk) * L); +} + +/** + * @brief Updates element density + * @version 17.12 + */ +void ElementData::updDensity() +{ + const double brine_salinity = ((theta[WATER] + theta[WATER_PREF]) > 0.) ? (salinity / (theta[WATER] + theta[WATER_PREF])) : (0.); //salinity = bulk salinity + // Calculate element density + Rho = theta[ICE] * Constants::density_ice + + (theta[WATER] + theta[WATER_PREF]) * (Constants::density_water + brine_salinity * SeaIce::betaS) + + theta[SOIL] * soil[SOIL_RHO]; + M = Rho * L; + return; } /** @@ -1010,6 +1487,11 @@ double ElementData::extinction() const //return(Edata->Rho/7. + 75. - 0.0*Edata->theta[WATER]); } +void ElementData::snowResidualWaterContent() +{ + res_wat_cont = snowResidualWaterContent(theta[ICE]); +} + /** * @brief Estimate the residual water content RWC by Vol \n * From work by Coleou and Lesaffre, 1998, Ann. Glaciol., 26, 64-68. \n @@ -1019,13 +1501,9 @@ double ElementData::extinction() const * - RWC by Mass 0.049 to 0.029 * @note That function will limit range to 0.0264 to 0.08 RWC by Vol * @version 11.01 + * @param[in] theta_i ice volumetric fraction * @return residual water content of snow element (1) */ -void ElementData::snowResidualWaterContent() -{ - res_wat_cont = snowResidualWaterContent(theta[ICE]); -} - double ElementData::snowResidualWaterContent(const double& theta_i) { double resWatCont; @@ -1055,30 +1533,64 @@ double ElementData::snowResidualWaterContent(const double& theta_i) * (Richtwerte Baugrund), which once more proves that "nomen est omen". * If my name was "Schachtschabel", I would never ever be dealing with * wet soils and Baugrund. - * @author Michael Lehning + * For the USDA soil classes used if Richards equation is used, the field capacity is obtained using the Saxton formula (K.E. Saxton et al., 1986, Estimating generalized soil-water characteristics from texture. Soil Sci. Soc. Amer. J. 50(4):1031-1036) and defined in the VG class + * @author Michael Lehning & Adrien michel * @version 9Y.mm - * @return Soil field capacity (?) + * @return Soil field capacity (-) */ double ElementData::soilFieldCapacity() const { double fc; - if (!(rg > 0.)) { - fc = std::min(SnLaws::field_capacity_soil, (1. - theta[SOIL]) * 0.1); + if (VG.defined == true) + { + fc=VG.field_capacity; + } + else{ + if (!(rg > 0.)) { + fc = std::min(SnLaws::field_capacity_soil, (1. - theta[SOIL]) * 0.1); + } else { + //Follow implementation by Tobias Hipp master thesis. + //Note that the value of 0.0976114 is more precise and the value of 60.8057 is + //slightly different from what is mentioned in thesis, to make the function continuous over rg. + if(rg<17.0) { + fc = std::min(0.95, 0.32 / sqrt(rg) + 0.02); + } else { + if(rg<60.8057) { + fc=0.0976114-0.002*(rg-17.0); + } else { + fc=0.01; + } + } + } + } + return std::min(1. - theta[SOIL], fc); // Ensure that the field capacity does not exceed the pore space. +} + +/** + * @brief RelativeHumidity + * @author Nander Wever et al. + * @brief Relative humidity in soil. + * The formulation is based on Saito et al., 2006 "Numerical analysis of + * coupled water vapor and heat transport in the vadose zone". + * Calculated from the pressure head using a thermodynamic relationship + * between liquid water and water vapor in soil pores (Philip and de Vries, 1957) + * @author Margaux Couttet + * @param Edata element data + * @param Temperature temperature (K) + * @return Soil relative humidity (-) + */ + +double ElementData::RelativeHumidity() const +{ + if (VG.defined == true) { + return (std::max(0., std::min(1., exp(h * Constants::g / (Constants::gas_constant * Te))))); //see eq. [18] from Saito et al., 2006 } else { - //Follow implementation by Tobias Hipp master thesis. - //Note that the value of 0.0976114 is more precise and the value of 60.8057 is - //slightly different from what is mentioned in thesis, to make the function continuous over rg. - if(rg<17.0) { - fc = std::min(0.95, 0.32 / sqrt(rg) + 0.02); + if ((theta[WATER] + theta[WATER_PREF]) < soilFieldCapacity() ) { + return (0.5 * ( 1. - cos (std::min(Constants::pi, (theta[WATER] + theta[WATER_PREF]) * Constants::pi / (soilFieldCapacity() * 1.6))))); } else { - if(rg<60.8057) { - fc=0.0976114-0.002*(rg-17.0); - } else { - fc=0.01; - } + return 1.; } } - return std::min(1. - theta[SOIL], fc); // Ensure that the field capacity does not exceed the pore space. } /** @@ -1094,8 +1606,8 @@ double ElementData::snowElasticity() const return Constants::big; const double g = (Rho >= 70.)? ((Rho/1000.0)*8.235)-0.47 : ((70./1000.0)*8.235 )-0.47; - const double h = pow(10.0, g); - return (h * 100000.0); + const double he = pow(10.0, g); + return (he * 100000.0); } /** @@ -1149,6 +1661,11 @@ double ElementData::neck2VolumetricStrain() const return (Ln / (2. * rg + Ln)); } +void ElementData::snowType() +{ + type = snowType(dd, sp, 2.*rg, static_cast(mk%100), theta[WATER], res_wat_cont); +} + /** * @brief Determine the type of snow \n * First revisited by Fierz and Bellaire 2006 and 2007 @@ -1156,19 +1673,13 @@ double ElementData::neck2VolumetricStrain() const * @version 11.11 * @return snow type code according to old-fashioned Swiss tradition */ - -void ElementData::snowType() -{ - type = snowType(dd, sp, 2.*rg, static_cast(mk%100), theta[WATER], res_wat_cont); -} - unsigned short int ElementData::getSnowType() const { return snowType(dd, sp, 2.*rg, static_cast(mk%100), theta[WATER], res_wat_cont); } unsigned short int ElementData::snowType(const double& dendricity, const double& sphericity, - const double& grain_size, const unsigned short int& marker, const double& theta_w, const double& res_wat_cont) + const double& grain_size, const unsigned short int& marker, const double& theta_w, const double& res_wat_cont_loc) { int a=-1,b=-1,c=0; @@ -1290,7 +1801,7 @@ unsigned short int ElementData::snowType(const double& dendricity, const double& } // Now treat a couple of exceptions - note that the order is important if (b < 0) b = a; - if ((marker >= 20) && (theta_w < 0.1 * res_wat_cont)) { // MFcr Melt-Freeze + if ((marker >= 20) && (theta_w < 0.1 * res_wat_cont_loc)) { // MFcr Melt-Freeze c = 2; } switch (marker) { @@ -1300,9 +1811,17 @@ unsigned short int ElementData::snowType(const double& dendricity, const double& case 4: // PPgp Graupel a = 0; b = 0; c = 0; break; + case 6: // technical Snow + a = 0; b = 0; c= 6; + break; case 7: case 8: case 17: case 18: case 27: case 28: // Glacier ice & IFil, that is, ice layers within the snowpack a = 8; b = 8; c = 0; break; + case 9: // water layer + a = 0; b = 0; c = 0; + break; + default: // do nothing since we take care of exceptions here + break; } return static_cast(a*100 + b*10 + c); @@ -1310,7 +1829,7 @@ unsigned short int ElementData::snowType(const double& dendricity, const double& const std::string ElementData::toString() const { - std::stringstream os; + std::ostringstream os; os << "\t"; os << std::fixed << std::showpoint; os << depositionDate.toString(mio::Date::ISO) << "\n"; @@ -1326,12 +1845,12 @@ const std::string ElementData::toString() const os << "\tStrains: dEps=" << dEps << " Eps=" << Eps << " Eps_e=" << Eps_e << " Eps_v=" << Eps_v << "\n"; os << "\tYoung's modulus of elasticity=" << E << "\n"; os << "\tStrain rates Eps_Dot=" << Eps_Dot << " Eps_vDpt=" << Eps_vDot << " CDot=" << CDot << "\n"; - os << "\tStability: S_dr=" << S_dr << " hard=" << hard << " dhf=" << dhf << "\n"; + os << "\tStability: S_dr=" << S_dr << " hard=" << hard << " dsm=" << dsm << "\n"; os << "\n"; return os.str(); } -std::iostream& operator<<(std::iostream& os, const NodeData& data) +std::ostream& operator<<(std::ostream& os, const NodeData& data) { os.write(reinterpret_cast(&data.z), sizeof(data.z)); os.write(reinterpret_cast(&data.u), sizeof(data.u)); @@ -1343,13 +1862,15 @@ std::iostream& operator<<(std::iostream& os, const NodeData& data) os.write(reinterpret_cast(&data.ssi), sizeof(data.ssi)); os.write(reinterpret_cast(&data.hoar), sizeof(data.hoar)); - os.write(reinterpret_cast(&data.dhf), sizeof(data.dhf)); - os.write(reinterpret_cast(&data.S_dhf), sizeof(data.S_dhf)); - os.write(reinterpret_cast(&data.Sigdhf), sizeof(data.Sigdhf)); + os.write(reinterpret_cast(&data.dsm), sizeof(data.dsm)); + os.write(reinterpret_cast(&data.S_dsm), sizeof(data.S_dsm)); + os.write(reinterpret_cast(&data.Sigdsm), sizeof(data.Sigdsm)); + os.write(reinterpret_cast(&data.water_flux), sizeof(data.water_flux)); + os.write(reinterpret_cast(&data.rhov), sizeof(data.rhov)); return os; } -std::iostream& operator>>(std::iostream& is, NodeData& data) +std::istream& operator>>(std::istream& is, NodeData& data) { is.read(reinterpret_cast(&data.z), sizeof(data.z)); is.read(reinterpret_cast(&data.u), sizeof(data.u)); @@ -1361,45 +1882,63 @@ std::iostream& operator>>(std::iostream& is, NodeData& data) is.read(reinterpret_cast(&data.ssi), sizeof(data.ssi)); is.read(reinterpret_cast(&data.hoar), sizeof(data.hoar)); - is.read(reinterpret_cast(&data.dhf), sizeof(data.dhf)); - is.read(reinterpret_cast(&data.S_dhf), sizeof(data.S_dhf)); - is.read(reinterpret_cast(&data.Sigdhf), sizeof(data.Sigdhf)); + is.read(reinterpret_cast(&data.dsm), sizeof(data.dsm)); + is.read(reinterpret_cast(&data.S_dsm), sizeof(data.S_dsm)); + is.read(reinterpret_cast(&data.Sigdsm), sizeof(data.Sigdsm)); + is.read(reinterpret_cast(&data.water_flux), sizeof(data.water_flux)); + is.read(reinterpret_cast(&data.rhov), sizeof(data.rhov)); return is; } const std::string NodeData::toString() const { - std::stringstream os; + std::ostringstream os; os << std::fixed << std::showpoint; os << "\n"; os << "\tz=" << z << " T=" << T << " hoar=" << hoar << "\n"; os << "\tCreep: u=" << u << " udot=" << udot << " f=" << f << "\n"; os << "\tStability: S_n=" << S_n << " S_s=" << S_s << " ssi=" << ssi << "\n"; + os << "\tWater flux: S_n=" << water_flux << "\n"; + os << "\rNodal vapor density: rhov=" << rhov << "\n"; os << "\n"; return os.str(); } -SnowStation::SnowStation(const bool& i_useCanopyModel, const bool& i_useSoilLayers) : - meta(), cos_sl(1.), sector(0), Cdata(), pAlbedo(0.), Albedo(0.), - SoilAlb(0.), BareSoil_z0(0.), SoilNode(0), Ground(0.), - cH(0.), mH(0.), mass_sum(0.), swe(0.), lwc_sum(0.), hn(0.), rho_hn(0.), ErosionLevel(0), ErosionMass(0.), +SnowStation::SnowStation(const bool i_useCanopyModel, const bool i_useSoilLayers, const bool i_isAlpine3D, + const bool i_useSeaIceModule) : + meta(), cos_sl(1.), sector(0), Cdata(), Seaice(NULL), pAlbedo(0.), Albedo(0.), + SoilAlb(0.), SoilEmissivity(0.), BareSoil_z0(0.), SoilNode(0), Ground(0.), + cH(0.), mH(IOUtils::nodata), mass_sum(0.), swe(0.), lwc_sum(0.), lwc_sum_soil(0.), swc_sum_soil(0), hn(0.), rho_hn(0.), rime_hn(0.), ErosionLevel(0), ErosionMass(0.), S_class1(0), S_class2(0), S_d(0.), z_S_d(0.), S_n(0.), z_S_n(0.), S_s(0.), z_S_s(0.), S_4(0.), z_S_4(0.), S_5(0.), z_S_5(0.), - Ndata(), Edata(), Kt(NULL), tag_low(0), ColdContent(0.), ColdContentSoil(0.), dIntEnergy(0.), dIntEnergySoil(0.), meltFreezeEnergy(0.), meltFreezeEnergySoil(0.), + Ndata(), Edata(), Kt(NULL), ColdContent(0.), ColdContentSoil(0.), dIntEnergy(0.), dIntEnergySoil(0.), meltFreezeEnergy(0.), meltFreezeEnergySoil(0.), ReSolver_dt(-1), windward(false), WindScalingFactor(1.), TimeCountDeltaHS(0.), - nNodes(0), nElems(0), useCanopyModel(i_useCanopyModel), useSoilLayers(i_useSoilLayers) {} + nNodes(0), nElems(0), maxElementID(0), useCanopyModel(i_useCanopyModel), useSoilLayers(i_useSoilLayers), isAlpine3D(i_isAlpine3D) +{ + if (i_useSeaIceModule) + Seaice = new SeaIce; + else + Seaice = NULL; +} SnowStation::SnowStation(const SnowStation& c) : - meta(c.meta), cos_sl(c.cos_sl), sector(c.sector), Cdata(c.Cdata), pAlbedo(c.pAlbedo), Albedo(c.Albedo), - SoilAlb(c.SoilAlb),BareSoil_z0(c.BareSoil_z0), SoilNode(c.SoilNode), Ground(c.Ground), - cH(c.cH), mH(c.mH), mass_sum(c.mass_sum), swe(c.swe), lwc_sum(c.lwc_sum), hn(c.hn), rho_hn(c.rho_hn), ErosionLevel(c.ErosionLevel), ErosionMass(c.ErosionMass), + meta(c.meta), cos_sl(c.cos_sl), sector(c.sector), Cdata(c.Cdata), Seaice(c.Seaice), pAlbedo(c.pAlbedo), Albedo(c.Albedo), + SoilAlb(c.SoilAlb), SoilEmissivity(c.SoilEmissivity), BareSoil_z0(c.BareSoil_z0), SoilNode(c.SoilNode), Ground(c.Ground), + cH(c.cH), mH(c.mH), mass_sum(c.mass_sum), swe(c.swe), lwc_sum(c.lwc_sum), lwc_sum_soil(c.lwc_sum_soil), swc_sum_soil(c.swc_sum_soil), hn(c.hn), rho_hn(c.rho_hn), rime_hn(c.rime_hn), ErosionLevel(c.ErosionLevel), ErosionMass(c.ErosionMass), S_class1(c.S_class1), S_class2(c.S_class2), S_d(c.S_d), z_S_d(c.z_S_d), S_n(c.S_n), z_S_n(c.z_S_n), S_s(c.S_s), z_S_s(c.z_S_s), S_4(c.S_4), z_S_4(c.z_S_4), S_5(c.S_5), z_S_5(c.z_S_5), - Ndata(c.Ndata), Edata(c.Edata), Kt(NULL), tag_low(c.tag_low), ColdContent(c.ColdContent), ColdContentSoil(c.ColdContentSoil), dIntEnergy(c.dIntEnergy), dIntEnergySoil(c.dIntEnergySoil), meltFreezeEnergy(c.meltFreezeEnergy), meltFreezeEnergySoil(c.meltFreezeEnergySoil), + Ndata(c.Ndata), Edata(c.Edata), Kt(NULL), ColdContent(c.ColdContent), ColdContentSoil(c.ColdContentSoil), dIntEnergy(c.dIntEnergy), dIntEnergySoil(c.dIntEnergySoil), meltFreezeEnergy(c.meltFreezeEnergy), meltFreezeEnergySoil(c.meltFreezeEnergySoil), ReSolver_dt(-1), windward(c.windward), WindScalingFactor(c.WindScalingFactor), TimeCountDeltaHS(c.TimeCountDeltaHS), - nNodes(c.nNodes), nElems(c.nElems), useCanopyModel(c.useCanopyModel), useSoilLayers(c.useSoilLayers) {} + nNodes(c.nNodes), nElems(c.nElems), maxElementID(c.maxElementID), useCanopyModel(c.useCanopyModel), useSoilLayers(c.useSoilLayers), isAlpine3D(c.isAlpine3D) { + if (c.Seaice != NULL) { + // Deep copy pointer to sea ice object + Seaice = new SeaIce(*c.Seaice); + } else { + Seaice = NULL; + } +} SnowStation& SnowStation::operator=(const SnowStation& source) { if(this != &source) { @@ -1407,9 +1946,16 @@ SnowStation& SnowStation::operator=(const SnowStation& source) { cos_sl = source.cos_sl; sector = source.sector; Cdata = source.Cdata; + if (source.Seaice != NULL) { + // Deep copy pointer to sea ice object + Seaice = new SeaIce(*source.Seaice); + } else { + Seaice = NULL; + } pAlbedo = source.pAlbedo; Albedo = source.Albedo; SoilAlb = source.SoilAlb; + SoilEmissivity = source.SoilEmissivity; BareSoil_z0 = source.BareSoil_z0; SoilNode = source.SoilNode; Ground = source.Ground; @@ -1418,8 +1964,11 @@ SnowStation& SnowStation::operator=(const SnowStation& source) { mass_sum = source.mass_sum; swe = source.swe; lwc_sum = source.lwc_sum; + lwc_sum_soil = source.lwc_sum; + swc_sum_soil = source.swc_sum_soil; hn = source.hn; rho_hn = source.rho_hn; + rime_hn = source.rime_hn; ErosionLevel = source.ErosionLevel; ErosionMass = source.ErosionMass; S_class1 = source.S_class1; @@ -1436,22 +1985,23 @@ SnowStation& SnowStation::operator=(const SnowStation& source) { z_S_5 = source.z_S_5; Ndata = source.Ndata; Edata = source.Edata; - Kt = source.Kt; - tag_low = source.tag_low; + Kt = NULL; ColdContent = source.ColdContent; ColdContentSoil = source.ColdContentSoil; dIntEnergy = source.dIntEnergy; dIntEnergySoil = source.dIntEnergySoil; meltFreezeEnergy = source.meltFreezeEnergy; meltFreezeEnergySoil = source.meltFreezeEnergySoil; + ReSolver_dt = source.ReSolver_dt; + windward = source.windward; + WindScalingFactor = source.WindScalingFactor; + TimeCountDeltaHS = source.TimeCountDeltaHS; nNodes = source.nNodes; nElems = source.nElems; + maxElementID = source.maxElementID; useCanopyModel = source.useCanopyModel; useSoilLayers = source.useSoilLayers; - windward = source.windward; - WindScalingFactor = source.WindScalingFactor; - TimeCountDeltaHS = source.TimeCountDeltaHS; - ReSolver_dt = source.ReSolver_dt; + isAlpine3D = source.isAlpine3D; } return *this; } @@ -1467,6 +2017,12 @@ SnowStation::~SnowStation() ReleaseBlockMatrix(&pMat->Mat.Block); } free(pMat); + pMat = NULL; + } + + if (Seaice != NULL) { + delete Seaice; + Seaice = NULL; } } @@ -1475,11 +2031,17 @@ SnowStation::~SnowStation() */ void SnowStation::compSnowpackMasses() { - mass_sum = swe = lwc_sum = 0.; + mass_sum = swe = lwc_sum = lwc_sum_soil = swc_sum_soil = 0.; for (size_t e = SoilNode; e < nElems; e++) { mass_sum += Edata[e].M; swe += Edata[e].L * Edata[e].Rho; - lwc_sum += Edata[e].L * (Edata[e].theta[WATER] * Constants::density_water); + lwc_sum += Edata[e].L * ((Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF]) * Constants::density_water); + } + if(SoilNode > 0){ + for (size_t e = 0; e < SoilNode; e++) { + lwc_sum_soil += Edata[e].L * ((Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF]) * Constants::density_water); + swc_sum_soil += Edata[e].L * (Edata[e].theta[ICE] * Constants::density_ice); + } } } @@ -1560,6 +2122,47 @@ double SnowStation::getModelledTemperature(const double& z) const } } +/** + * @brief Computes the total lateral flow in snow (kg / m2) + */ +double SnowStation::getTotalLateralFlowSnow() const +{ + // Case of no snow + if (getNumberOfElements() == SoilNode) return Constants::undefined; + + double tmp_sum = 0.; + for (size_t e=SoilNode; enEdata_old) { + for(size_t ii=nEdata_old; ii 0) { - const size_t rnE = nElems - nRemove; //Reduced number of elements - reduceNumberOfElements(rnE); - } -} - -void SnowStation::combineElements(const size_t& i_number_top_elements, const bool& reduce_n_elements, const size_t& cond) +void SnowStation::combineElements(const size_t& i_number_top_elements, const bool& reduce_n_elements, const size_t& cond, const double& comb_thresh_l) { if (nElems - SoilNode < i_number_top_elements+1) { return; @@ -1654,7 +2252,7 @@ void SnowStation::combineElements(const size_t& i_number_top_elements, const boo for (size_t eLower = SoilNode, eUpper = SoilNode+1; eLower < nElems-i_number_top_elements; eLower++, eUpper++) { switch (cond) { case 1: // merging WaterTransport - merge = (combineCondition(Edata[eLower], Edata[eUpper], cH-Ndata[eUpper].z, reduce_n_elements)); + merge = (combineCondition(Edata[eLower], Edata[eUpper], cH-Ndata[eUpper].z, reduce_n_elements, comb_thresh_l)); break; case 2: // aggregate first round merge = (Aggregate::joinSimilarLayers(Edata[eUpper], Edata[eLower])); @@ -1704,7 +2302,9 @@ void SnowStation::reduceNumberOfElements(const size_t& rnE) Ndata[eNew].S_s = Ndata[e+1].S_s; Ndata[eNew].S_n = Ndata[e+1].S_n; } else { // Removing elements for negative length L - dL += Edata[e].L; + // Under the condition of multiple element removals, Edata[e].L can occasionally represent a compounded element, + // such that it doesn't reflect the true height change. Better to use the nodal positions: + dL += (Ndata[e+1].z - Ndata[e].z); } } else { if (eNew < e) { @@ -1746,6 +2346,8 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec SoilAlb = SSdata.SoilAlb; BareSoil_z0 = SSdata.BareSoil_z0; + SoilEmissivity = (SSdata.Emissivity_soil == mio::IOUtils::nodata) ? Constants::emissivity_soil : SSdata.Emissivity_soil; + WindScalingFactor = SSdata.WindScalingFactor; TimeCountDeltaHS = SSdata.TimeCountDeltaHS; @@ -1753,7 +2355,7 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec cos_sl = cos(meta.getSlopeAngle()*mio::Cst::to_rad); sector = i_sector; - mH = cH = SSdata.Height; + cH = SSdata.Height; nNodes = SSdata.nN; nElems = SSdata.nN-1; @@ -1762,7 +2364,8 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec SoilNode = 0; Ground = 0.0; Ndata.front().z = 0.; - Ndata.front().T = (SSdata.nLayers > 0)? SSdata.Ldata.front().tl : Constants::melting_tk; + Ndata.front().T = (SSdata.nLayers > 0)? SSdata.Ldata.front().tl : Constants::meltfreeze_tk; + Ndata.front().rhov = Atmosphere::waterVaporDensity(Ndata.front().T, Atmosphere::vaporSaturationPressure(Ndata.front().T)); Ndata.front().u = 0.; Ndata.front().f = 0.; Ndata.front().udot = 0.; @@ -1772,7 +2375,7 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec bool real_soil_no_sandwich = true; // Switch to count real soil layers for (size_t ll = 0, n = 1; ll < SSdata.nLayers; ll++) { - // Update ground heigth and SoilNode number + // Update ground height and SoilNode number if (SSdata.Ldata[ll].phiSoil > 0.0 && real_soil_no_sandwich) { Ground += SSdata.Ldata[ll].hl; SoilNode += SSdata.Ldata[ll].ne; @@ -1785,11 +2388,12 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec for (size_t le = 0; le < SSdata.Ldata[ll].ne; le++, n++ ) { Ndata[n].z = Ndata[n-1].z + SSdata.Ldata[ll].hl / static_cast(SSdata.Ldata[ll].ne); Ndata[n].T = Ndata[n-1].T + dT; + Ndata[n].rhov = Atmosphere::waterVaporDensity(Ndata[n].T, Atmosphere::vaporSaturationPressure(Ndata[n].T)); Ndata[n].u = 0.; Ndata[n].f = 0.; Ndata[n].udot = 0.; - Ndata[n].S_n = INIT_STABILITY; // Static natural stability index - Ndata[n].S_s = INIT_STABILITY; // Alternative Stability Index (skier stability) + Ndata[n].S_n = IOUtils::nodata; // Static natural stability index + Ndata[n].S_s = IOUtils::nodata; // Alternative Stability Index (skier stability) } } @@ -1805,6 +2409,7 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec Edata[e].depositionDate = Date::rnd(SSdata.Ldata[ll].depositionDate, 1.); // Temperature data Edata[e].Te = (Ndata[e].T+Ndata[e+1].T) / 2.; + Edata[e].rhov = Atmosphere::waterVaporDensity(Edata[e].Te, Atmosphere::vaporSaturationPressure(Edata[e].Te)); Edata[e].L0 = Edata[e].L = (Ndata[e+1].z - Ndata[e].z); Edata[e].gradT = (Ndata[e+1].T-Ndata[e].T) / Edata[e].L; // Creep data @@ -1816,7 +2421,10 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec Edata[e].theta[SOIL] = SSdata.Ldata[ll].phiSoil; Edata[e].theta[AIR] = SSdata.Ldata[ll].phiVoids; Edata[e].theta[ICE] = SSdata.Ldata[ll].phiIce; + Edata[e].theta_i_reservoir = SSdata.Ldata[ll].phiIceReservoir; + Edata[e].theta_i_reservoir_cumul = SSdata.Ldata[ll].phiIceReservoirCumul; Edata[e].theta[WATER] = SSdata.Ldata[ll].phiWater; + Edata[e].theta[WATER_PREF] = SSdata.Ldata[ll].phiWaterPref; Edata[e].soil[SOIL_RHO] = SSdata.Ldata[ll].SoilRho; Edata[e].soil[SOIL_K] = SSdata.Ldata[ll].SoilK; Edata[e].soil[SOIL_C] = SSdata.Ldata[ll].SoilC; @@ -1826,8 +2434,7 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec Edata[e].conc[WATER][ii] = SSdata.Ldata[ll].cWater[ii]; Edata[e].conc[AIR][ii] = SSdata.Ldata[ll].cVoids[ii]; } - Edata[e].Rho = Edata[e].theta[ICE]*Constants::density_ice + - Edata[e].theta[WATER]*Constants::density_water + Edata[e].theta[SOIL]*Edata[e].soil[SOIL_RHO]; + Edata[e].updDensity(); assert(Edata[e].Rho >= 0. || Edata[e].Rho==IOUtils::nodata); //we want positive density // conductivities, specific heat and moisture content Edata[e].k[TEMPERATURE] = Edata[e].k[SEEPAGE] = Edata[e].k[SETTLEMENT] = 0.; @@ -1853,10 +2460,33 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec // Memories, memories Edata[e].CDot = SSdata.Ldata[ll].CDot; Edata[e].metamo = SSdata.Ldata[ll].metamo; - Edata[e].S_dr = INIT_STABILITY; + Edata[e].salinity = SSdata.Ldata[ll].salinity; + Edata[e].h = SSdata.Ldata[ll].h; + Edata[e].dsm = SSdata.Ldata[ll].dsm; + Edata[e].S_dr = IOUtils::nodata; Edata[e].hard = IOUtils::nodata; Edata[e].M = Edata[e].Rho * Edata[e].L0; assert(Edata[e].M >= (-Constants::eps2)); //mass must be positive + + // Check initial volumetric content of element + if ( !Edata[e].checkVolContent() ) { + prn_msg(__FILE__, __LINE__, "err", Date(), "wrong volumetric content upon initialization of layer %d. ice: %lf, water: %lf, water_pref: %lf, air: %lf, soil: %lf\n", + e, Edata[e].theta[ICE], Edata[e].theta[WATER], Edata[e].theta[WATER_PREF], Edata[e].theta[AIR], Edata[e].theta[SOIL]); + throw IOException("Snowpack Initialization failed", AT); + } + // Check if sufficient pore space is available when water would freeze. Only allow small corrections. + const double porespace = (1. - Edata[e].theta[ICE] - Edata[e].theta[SOIL]) * (Constants::density_ice / Constants::density_water); + if(Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF] > porespace) { + if(Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF] - porespace > 1.e-4) { + prn_msg(__FILE__, __LINE__, "err", Date(), "Insufficient pore space available in layer %d when water would refreeze and thereby expand. Available: %lf, required: %lf\n", + e, (1. - Edata[e].theta[ICE] - Edata[e].theta[SOIL]), (Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF]) * (Constants::density_water / Constants::density_ice)); + throw IOException("Snowpack Initialization failed", AT); + } + const double tmp_sum = Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF]; + Edata[e].theta[WATER] *= porespace / tmp_sum; + Edata[e].theta[WATER_PREF] *= porespace / tmp_sum; + } + Edata[e].theta[AIR] = (1. - Edata[e].theta[ICE] - Edata[e].theta[WATER] - Edata[e].theta[WATER_PREF] - Edata[e].theta[SOIL]); } // end of element layer for } // end of layer for @@ -1868,73 +2498,47 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec if (e < nElems-1) SigC -= (.5*Edata[e+1].M) * Constants::g * cos_sl; SigC -= (.5*Edata[e].M) * Constants::g * cos_sl; - + if(SigC == 0) { + SigC = -500; + } Edata[e].C = SigC; assert(Edata[e].C<0.); } + // Sea ice initializations + if (Seaice != NULL) { + Seaice->InitSeaIce(*this); + Seaice->updateFreeboard(*this); + for (size_t e = nElems; e -->0; ) { + const double br_sal = (Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF] == 0.) ? (0.) : (Edata[e].salinity / (Edata[e].theta[WATER] + Edata[e].theta[WATER_PREF])); + if (Edata[e].salinity > 0.) { + Edata[e].meltfreeze_tk = Seaice->calculateMeltingTemperature(br_sal); + } + if (Edata[e].h == Constants::undefined) { + Edata[e].h = Seaice->SeaLevel - .5 * (Ndata[e].z + Ndata[e+1].z); + } else { + // Initialize + if (e >= SoilNode) { //Snow + Edata[e].VG.SetVGParamsSnow(vanGenuchten::YAMAGUCHI2012, vanGenuchten::CALONNE, /*matrix*/ true, /*seaice*/ true); + } else { //Soil + Edata[e].VG.SetVGParamsSoil(); + } + // If pressure head indicates full saturation, make sure no rounding errors exists from writing/reading sno files. + if (Edata[e].h >= Edata[e].VG.h_e) { + Edata[e].theta[WATER] = (1. - Edata[e].theta[ICE] - Edata[e].theta[SOIL]) * (Constants::density_ice / Constants::density_water) - Edata[e].theta[WATER_PREF]; + Edata[e].theta[AIR] = 1. - Edata[e].theta[ICE] - Edata[e].theta[WATER] - Edata[e].theta[WATER_PREF] - Edata[e].theta[SOIL]; + Edata[e].updDensity(); + } + } + } + } + // Cold content and snowpack masses compSnowpackInternalEnergyChange(900.); // Time (900 s) will not matter as Qmf == 0. for all layers compSoilInternalEnergyChange(900.); // Time (900 s) will not matter as Qmf == 0. for all layers compSnowpackMasses(); - // INITIALIZE CANOPY DATA - //HACK: do this in Canopy! - Cdata.height = (SSdata.Canopy_Height > 0.0)? SSdata.Canopy_Height : 0.; - Cdata.storage = 0.0; // intercepted water (kg m-2 or mm Water Equivalent) - Cdata.temp = 273.15; // temperature (K) - Cdata.Ttrunk = 273.15; // trunk temperature (K) - Cdata.QStrunks = 0.; - Cdata.SWnet_Trunks = 0.; - Cdata.LWnet_Trunks = 0.; - Cdata.CondFluxCanop = 0.; - Cdata.CondFluxTrunks = 0.; - Cdata.forestfloor_alb = 0.; - Cdata.snowfac=0.; - Cdata.rainfac=0.; - Cdata.liquidfraction=0.; - Cdata.canopyalb = Canopy::can_alb_dry; // albedo [-], which is a function of the dry canopy albedo and intercepted snow - Cdata.wetfraction = 0.0; - Cdata.intcapacity = 0.0; - Cdata.ra = 0.0; - Cdata.rc = 0.0; - Cdata.rs = 0.0; - Cdata.rstransp = 0.0; - Cdata.sigftrunk = 0.; - Cdata.HMLeaves=3.*4190.; //HACK what is this 4190? - Cdata.HMTrunks=30.*4190.; - if (useCanopyModel) { - Cdata.BasalArea = SSdata.Canopy_BasalArea; - Cdata.sigf = 1.-exp(-Canopy::krnt_lai * (Cdata.lai)); // 1-radiation transmissivity (-) - Cdata.ec = 1.0; //longwave emissivity - Cdata.lai = SSdata.Canopy_LAI; - Cdata.z0m = Cdata.height*0.1; - Cdata.z0h = Cdata.z0m*0.1; - Cdata.zdispl = Cdata.height*0.66; - Cdata.direct_throughfall = SSdata.Canopy_Direct_Throughfall; - if (SSdata.Canopy_Direct_Throughfall < 0. || SSdata.Canopy_Direct_Throughfall > 1.) { - prn_msg(__FILE__, __LINE__, "err", Date(), "Invalid Canopy Throughfall (%lf) given in sno file! It should be between 0 and 1.", SSdata.Canopy_Direct_Throughfall); - throw IOException("Snowpack Initialization failed", AT); - } - } else { - Cdata.BasalArea = 0.0; - Cdata.storage = 0.0; // intercepted water (kg m-2 or mm Water Equivalent) - Cdata.temp = 273.15; // temperature (K) - Cdata.canopyalb = Canopy::can_alb_dry; // albedo [-], which is a function of the dry canopy albedo and intercepted snow - Cdata.wetfraction = 0.0; - Cdata.intcapacity = 0.0; - Cdata.lai = 0.0; - Cdata.sigf = 1.0; // radiation transmissivity (-) - Cdata.ec = 1.0; //longwave emissivity - Cdata.z0m = 0.0; - Cdata.z0h = 0.0; - Cdata.zdispl = 0.0; - Cdata.direct_throughfall = 1.0; - Cdata.ra = 0.0; - Cdata.rc = 0.0; - Cdata.rs = 0.0; - Cdata.rstransp = 0.0; - } + Cdata.initialize(SSdata, useCanopyModel, isAlpine3D); // Set time step to -1, so we can determine the first time ReSolver1d is called. ReSolver_dt = -1.; @@ -1947,7 +2551,7 @@ void SnowStation::initialize(const SN_SNOWSOIL_DATA& SSdata, const size_t& i_sec * @param depth Distance of the element from the snow surface * @return Maximum element length. */ -double SnowStation::flexibleMaxElemLength(const double& depth) +double SnowStation::flexibleMaxElemLength(const double& depth, const double& comb_thresh_l) { static const double upper_limit_length=1.0; const double calc_length = static_cast( int( int(depth * 100.) / 10) + 1) * comb_thresh_l; @@ -1971,14 +2575,14 @@ double SnowStation::flexibleMaxElemLength(const double& depth) * @param reduce_n_elements Enable more "aggressive" combining for layers deeper in the snowpack, to reduce the number of elements and thus the computational load. * @return true if the two elements should be combined, false otherwise */ -bool SnowStation::combineCondition(const ElementData& Edata0, const ElementData& Edata1, const double& depth, const bool& reduce_n_elements) +bool SnowStation::combineCondition(const ElementData& Edata0, const ElementData& Edata1, const double& depth, const bool& reduce_n_elements, const double& comb_thresh_l) { // Default max_elem_l double max_elem_l = comb_thresh_l; // When aggressive combining is activated, override max_elem_l when necessary if (reduce_n_elements == true) { - max_elem_l = flexibleMaxElemLength(depth); + max_elem_l = flexibleMaxElemLength(depth, comb_thresh_l); } if ( (Edata0.L > max_elem_l) || (Edata1.L > max_elem_l) ) @@ -1993,7 +2597,7 @@ bool SnowStation::combineCondition(const ElementData& Edata0, const ElementData& if ( Edata0.theta[SOIL] > 0. || Edata1.theta[SOIL] > 0. ) return false; - if ( (Edata0.mk >= 100) || (Edata1.mk >= 100) ) + if ( (Edata0.mk >= 100 && int(Edata0.mk/1000)!=9) || (Edata1.mk >= 100 && int(Edata0.mk/1000)!=9) ) return false; if ( (Edata0.mk%100 == 3) || (Edata1.mk%100 == 3) ) @@ -2018,13 +2622,56 @@ bool SnowStation::combineCondition(const ElementData& Edata0, const ElementData& return true; } +/** + * @brief Split the element provided as first argument. + */ +void SnowStation::splitElement(const size_t& e) +{ + resize(nElems+1); + if(e!=nElems-2) { // If it is not the top node that needs splitting ... (Note that we have to reference nElems-2, as resize has been called (thus nElems increased) already.) + // then shift all elements and nodes above upward + for(size_t ee = nElems-1; ee >= e+2; ee--) { + Edata[ee]=Edata[ee-1]; + Ndata[ee+1]=Ndata[ee]; + Ndata[ee]=Ndata[ee-1]; + } + } + // Fill info of new element + Edata[e+1]=Edata[e]; + // Half the element + Edata[e].L*=0.5; + Edata[e].L0*=0.5; + Edata[e+1].L*=0.5; + Edata[e+1].L0*=0.5; + Edata[e].M*=0.5; + Edata[e+1].M*=0.5; + // Fill info of new node + Ndata[e+2]=Ndata[e+1]; + Ndata[e+1].hoar=0.; + Ndata[e+1].T=Edata[e].Te; + Ndata[e+1].rhov=Edata[e].rhov; + // Position the new node correctly in the domain + Ndata[e+1].z=(Ndata[e+2].z+Ndata[e].z)/2.; + Ndata[e+2].u*=0.5; + Ndata[e+1].u*=0.5; + // Remove "marked layer" mk from lower layer + if(int(Edata[e].mk/1000) == 9) Edata[e].mk-=static_cast(9000); + // Correct pressure head in case of saturation + if(Edata[e].h > Edata[e].VG.h_e) { + Edata[e].h+=.5*Edata[e].L; + Edata[e+1].h-=.5*Edata[e+1].L; + } +} + /** * @brief Split elements when they are near the top of the snowpack, when REDUCE_N_ELEMENTS is used. * - This function split elements when they are getting closer to the top of the snowpack. This is required * when using the "aggressive" merging option (REDUCE_N_ELEMENTS). When snow melt brings elements back to the * snow surface, smaller layer spacing is required to accurately describe temperature and moisture gradients. + * @param max_element_length If positive: maximum allowed element length (m), above which splitting is applied. + * If argument is not positive: use function flexibleMaxElemLength. */ -void SnowStation::splitElements() +void SnowStation::splitElements(const double& max_element_length, const double& comb_thresh_l) { //Return when no snow present if (nElems == SoilNode) return; @@ -2032,34 +2679,11 @@ void SnowStation::splitElements() for (size_t e = SoilNode; e < nElems; e++) { double max_elem_l = comb_thresh_l; const double depth = cH - Ndata[e].z; - max_elem_l = flexibleMaxElemLength(depth); + // If max_element_length > 0: take its value, else use function flexibleMaxElemLength. + max_elem_l = (max_element_length > 0) ? (0.5 * max_element_length) : (flexibleMaxElemLength(depth, comb_thresh_l)); if(0.5*(Edata[e].L) > max_elem_l) { - resize(nElems+1); - if(e!=nElems-1) { // If it is not the top node that needs splitting ... - // then shift all elements and nodes above upward - for(size_t ee = nElems-1; ee >= e+2; ee--) { - Edata[ee]=Edata[ee-1]; - Ndata[ee+1]=Ndata[ee]; - Ndata[ee]=Ndata[ee-1]; - } - } - // Fill info of new element - Edata[e+1]=Edata[e]; - // Half the element - Edata[e].L*=0.5; - Edata[e].L0*=0.5; - Edata[e+1].L*=0.5; - Edata[e+1].L0*=0.5; - Edata[e].M*=0.5; - Edata[e+1].M*=0.5; - // Fill info of new node - Ndata[e+2]=Ndata[e+1]; - Ndata[e+1].hoar=0.; - Ndata[e+1].T=Edata[e].Te; - // Position the new node correctly in the domain - Ndata[e+1].z=(Ndata[e+2].z+Ndata[e].z)/2.; - Ndata[e+2].u*=0.5; - Ndata[e+1].u*=0.5; + splitElement(e); + e--; // Make sure the same element gets checked again, in case 1 split is not sufficient } } } @@ -2087,6 +2711,8 @@ void SnowStation::mergeElements(ElementData& EdataLower, const ElementData& Edat const double L_lower = EdataLower.L; //Thickness of lower element const double L_upper = EdataUpper.L; //Thickness of upper element double LNew = L_lower; //Thickness of "new" element + double theta_air_lower=EdataLower.theta[AIR]; // the volume fraction of air for the lower element before any changes + double Rho_lower=EdataLower.Rho; // the density of the lower element before any changes if (merge) { // Determine new element length under the condition of keeping the density of the lower element constant, if the density of the lower element is larger than the upper element. @@ -2105,6 +2731,7 @@ void SnowStation::mergeElements(ElementData& EdataLower, const ElementData& Edat EdataLower.rb = ( EdataLower.theta[ICE]*L_lower*EdataLower.rb + EdataUpper.theta[ICE]*L_upper*EdataUpper.rb ) / (EdataLower.theta[ICE]*L_lower + EdataUpper.theta[ICE]*L_upper); EdataLower.CDot = ( EdataLower.theta[ICE]*L_lower*EdataLower.CDot + EdataUpper.theta[ICE]*L_upper*EdataUpper.CDot ) / (EdataLower.theta[ICE]*L_lower + EdataUpper.theta[ICE]*L_upper); } + EdataLower.h = ((EdataLower.h < EdataLower.VG.h_e && EdataUpper.h < EdataUpper.VG.h_e) || (EdataLower.h > EdataLower.VG.h_e && EdataUpper.h > EdataUpper.VG.h_e)) ? ((EdataLower.h * L_lower + EdataUpper.h * L_upper) / (LNew)) : (Constants::undefined); // Only calculate average when both pressure heads are positive or negative. EdataLower.opticalEquivalentGrainSize(); EdataLower.Eps = EdataLower.Eps_v; //HACK: why? EdataLower.Eps_e = 0.0; // TODO (very old) Check whether not simply add the elastic @@ -2116,23 +2743,30 @@ void SnowStation::mergeElements(ElementData& EdataLower, const ElementData& Edat EdataLower.L0 = EdataLower.L = LNew; EdataLower.M += EdataUpper.M; EdataLower.theta[ICE] = (L_upper*EdataUpper.theta[ICE] + L_lower*EdataLower.theta[ICE]) / LNew; + EdataLower.theta_i_reservoir = (L_upper*EdataUpper.theta_i_reservoir + L_lower*EdataLower.theta_i_reservoir) / LNew; // Also merge the ice reservoirs + EdataLower.theta_i_reservoir_cumul = (L_upper*EdataUpper.theta_i_reservoir_cumul + L_lower*EdataLower.theta_i_reservoir_cumul) / LNew; // Also merge the cumulated ice reservoirs EdataLower.theta[WATER] = (L_upper*EdataUpper.theta[WATER] + L_lower*EdataLower.theta[WATER]) / LNew; - EdataLower.theta[AIR] = 1.0 - EdataLower.theta[WATER] - EdataLower.theta[ICE] - EdataLower.theta[SOIL]; + EdataLower.theta[WATER_PREF] = (L_upper*EdataUpper.theta[WATER_PREF] + L_lower*EdataLower.theta[WATER_PREF]) / LNew; + EdataLower.theta[AIR] = 1.0 - EdataLower.theta[WATER] - EdataLower.theta[WATER_PREF] - EdataLower.theta[ICE] - EdataLower.theta[SOIL]; + EdataLower.salinity = (L_upper * EdataUpper.salinity + L_lower * EdataLower.salinity) / LNew; // For snow, check if there is enough space to store all ice if all water would freeze. This also takes care of cases where theta[AIR]<0. - if ((merge==false && topElement==true) && EdataLower.theta[SOIL]= 100) && (EdataLower.mk < 100)) { EdataLower.mk += static_cast( (EdataUpper.mk/100)*100 ); } + EdataLower.heatCapacity(); + + EdataLower.vapTrans_snowDenChangeRate = (EdataUpper.vapTrans_snowDenChangeRate*EdataUpper.Rho*L_upper + EdataLower.vapTrans_snowDenChangeRate*Rho_lower*L_lower)/(L_upper*EdataUpper.Rho+L_lower*Rho_lower); + EdataLower.vapTrans_cumulativeDenChange = (EdataUpper.vapTrans_cumulativeDenChange*EdataUpper.Rho*L_upper + EdataLower.vapTrans_cumulativeDenChange*Rho_lower*L_lower)/(L_upper*EdataUpper.Rho+L_lower*Rho_lower); + EdataLower.vapTrans_fluxDiff = (EdataUpper.vapTrans_fluxDiff*EdataUpper.Rho*L_upper + EdataLower.vapTrans_fluxDiff*Rho_lower*L_lower)/(L_upper*EdataUpper.Rho+L_lower*Rho_lower); + EdataLower.Qmm = (EdataUpper.Qmm*EdataUpper.Rho*L_upper + EdataLower.Qmm*Rho_lower*L_lower)/(L_upper*EdataUpper.Rho+L_lower*Rho_lower); + EdataLower.vapTrans_underSaturationDegree = (EdataUpper.vapTrans_underSaturationDegree*EdataUpper.Rho*L_upper + EdataLower.vapTrans_underSaturationDegree*Rho_lower*L_lower)/(L_upper*EdataUpper.Rho+L_lower*Rho_lower); + EdataLower.rhov = (EdataUpper.rhov*EdataUpper.theta[AIR]*L_upper + EdataLower.rhov*theta_air_lower*L_lower)/EdataLower.theta[AIR]; + EdataLower.Eps_vDot = (EdataUpper.Eps_vDot*EdataUpper.Rho*L_upper + EdataLower.Eps_vDot*Rho_lower*L_lower)/(L_upper*EdataUpper.Rho+L_lower*Rho_lower); } /** @@ -2174,8 +2817,8 @@ bool SnowStation::isGlacier(const bool& hydro) const static const size_t check_depth=5; const size_t top_index = nElems-1; const size_t top_index_toCheck = top_index - check_depth; - const size_t soil_index = SoilNode-1; - const size_t end_index = (top_index_toCheck>soil_index)? top_index_toCheck : soil_index; + const size_t soil_index = SoilNode; + const size_t end_index = (top_index_toCheck>=soil_index)? top_index_toCheck : soil_index; if (nElems==0 || top_index==soil_index) return false; //there are only soil layers or none @@ -2189,21 +2832,40 @@ bool SnowStation::isGlacier(const bool& hydro) const return is_pure_ice; } - } -std::iostream& operator<<(std::iostream& os, const SnowStation& data) +/** + * @brief returns the height of a marked reference layer inside the model domain + * Searches for the layer that is marked using (int(mk/1000)==9, e.g. 9000 or 9028) inside model domain + * This is for example used to interpret snow height measurements with an arbitrary reference level + * (i.e., not necessarily 0.) on a glacier, ice sheets or sea ice using the snow height driven mode. + * @return height of top node of marked reference layer + */ +double SnowStation::findMarkedReferenceLayer() const { - // HACK: nothing is done for the void* Kt + if(nElems == 0) { + return Constants::undefined; + } + for (size_t e = SoilNode; e < nElems; e++) { + if (int(Edata[e].mk/1000) == 9) { + return Ndata[e+1].z; + } + } + return Constants::undefined; +} +std::ostream& operator<<(std::ostream& os, const SnowStation& data) +{ os << data.meta; os.write(reinterpret_cast(&data.cos_sl), sizeof(data.cos_sl)); os.write(reinterpret_cast(&data.sector), sizeof(data.sector)); os << data.Cdata; + //os << data.Seaice; //HACK how to do this with a pointer? os.write(reinterpret_cast(&data.pAlbedo), sizeof(data.pAlbedo)); os.write(reinterpret_cast(&data.Albedo), sizeof(data.Albedo)); os.write(reinterpret_cast(&data.SoilAlb), sizeof(data.SoilAlb)); + os.write(reinterpret_cast(&data.SoilEmissivity), sizeof(data.SoilEmissivity)); os.write(reinterpret_cast(&data.BareSoil_z0), sizeof(data.BareSoil_z0)); os.write(reinterpret_cast(&data.SoilNode), sizeof(data.SoilNode)); os.write(reinterpret_cast(&data.Ground), sizeof(data.Ground)); @@ -2212,8 +2874,11 @@ std::iostream& operator<<(std::iostream& os, const SnowStation& data) os.write(reinterpret_cast(&data.mass_sum), sizeof(data.mass_sum)); os.write(reinterpret_cast(&data.swe), sizeof(data.swe)); os.write(reinterpret_cast(&data.lwc_sum), sizeof(data.lwc_sum)); + os.write(reinterpret_cast(&data.lwc_sum_soil), sizeof(data.lwc_sum_soil)); + os.write(reinterpret_cast(&data.swc_sum_soil), sizeof(data.swc_sum_soil)); os.write(reinterpret_cast(&data.hn), sizeof(data.hn)); os.write(reinterpret_cast(&data.rho_hn), sizeof(data.rho_hn)); + os.write(reinterpret_cast(&data.rime_hn), sizeof(data.rime_hn)); os.write(reinterpret_cast(&data.ErosionLevel), sizeof(data.ErosionLevel)); os.write(reinterpret_cast(&data.ErosionMass), sizeof(data.ErosionMass)); os.write(reinterpret_cast(&data.S_class1), sizeof(data.S_class1)); @@ -2238,7 +2903,6 @@ std::iostream& operator<<(std::iostream& os, const SnowStation& data) for (size_t ii=0; ii(&data.tag_low), sizeof(data.tag_low)); os.write(reinterpret_cast(&data.ColdContent), sizeof(data.ColdContent)); os.write(reinterpret_cast(&data.ColdContentSoil), sizeof(data.ColdContentSoil)); os.write(reinterpret_cast(&data.dIntEnergy), sizeof(data.dIntEnergy)); @@ -2259,18 +2923,19 @@ std::iostream& operator<<(std::iostream& os, const SnowStation& data) os.write(reinterpret_cast(&data.comb_thresh_rg), sizeof(data.comb_thresh_rg)); os.write(reinterpret_cast(&data.thresh_moist_snow), sizeof(data.thresh_moist_snow)); os.write(reinterpret_cast(&data.thresh_moist_soil), sizeof(data.thresh_moist_soil)); - os.write(reinterpret_cast(&data.number_top_elements), sizeof(data.number_top_elements));*/ - os.write(reinterpret_cast(&data.number_of_solutes), sizeof(data.number_of_solutes)); + os.write(reinterpret_cast(&data.number_top_elements), sizeof(data.number_top_elements)); + os.write(reinterpret_cast(&data.number_of_solutes), sizeof(data.number_of_solutes));*/ // private member variables: os.write(reinterpret_cast(&data.nNodes), sizeof(data.nNodes)); os.write(reinterpret_cast(&data.nElems), sizeof(data.nElems)); + os.write(reinterpret_cast(&data.maxElementID), sizeof(data.maxElementID)); os.write(reinterpret_cast(&data.useCanopyModel), sizeof(data.useCanopyModel)); os.write(reinterpret_cast(&data.useSoilLayers), sizeof(data.useSoilLayers)); return os; } -std::iostream& operator>>(std::iostream& is, SnowStation& data) +std::istream& operator>>(std::istream& is, SnowStation& data) { // HACK: nothing is done for the void* Kt @@ -2279,9 +2944,11 @@ std::iostream& operator>>(std::iostream& is, SnowStation& data) is.read(reinterpret_cast(&data.sector), sizeof(data.sector)); is >> data.Cdata; + //is >> data.Seaice; //HACK how to do this with a pointer? is.read(reinterpret_cast(&data.pAlbedo), sizeof(data.pAlbedo)); is.read(reinterpret_cast(&data.Albedo), sizeof(data.Albedo)); is.read(reinterpret_cast(&data.SoilAlb), sizeof(data.SoilAlb)); + is.read(reinterpret_cast(&data.SoilEmissivity), sizeof(data.SoilEmissivity)); is.read(reinterpret_cast(&data.BareSoil_z0), sizeof(data.BareSoil_z0)); is.read(reinterpret_cast(&data.SoilNode), sizeof(data.SoilNode)); is.read(reinterpret_cast(&data.Ground), sizeof(data.Ground)); @@ -2290,8 +2957,11 @@ std::iostream& operator>>(std::iostream& is, SnowStation& data) is.read(reinterpret_cast(&data.mass_sum), sizeof(data.mass_sum)); is.read(reinterpret_cast(&data.swe), sizeof(data.swe)); is.read(reinterpret_cast(&data.lwc_sum), sizeof(data.lwc_sum)); + is.read(reinterpret_cast(&data.lwc_sum_soil), sizeof(data.lwc_sum_soil)); + is.read(reinterpret_cast(&data.swc_sum_soil), sizeof(data.swc_sum_soil)); is.read(reinterpret_cast(&data.hn), sizeof(data.hn)); is.read(reinterpret_cast(&data.rho_hn), sizeof(data.rho_hn)); + is.read(reinterpret_cast(&data.rime_hn), sizeof(data.rime_hn)); is.read(reinterpret_cast(&data.ErosionLevel), sizeof(data.ErosionLevel)); is.read(reinterpret_cast(&data.ErosionMass), sizeof(data.ErosionMass)); is.read(reinterpret_cast(&data.S_class1), sizeof(data.S_class1)); @@ -2314,12 +2984,11 @@ std::iostream& operator>>(std::iostream& is, SnowStation& data) size_t s_Edata; is.read(reinterpret_cast(&s_Edata), sizeof(size_t)); - data.Edata.resize(s_Edata); + data.Edata.resize( s_Edata, ElementData(ElementData::noID) ); for (size_t ii=0; ii> data.Edata[ii]; data.Kt = NULL; - is.read(reinterpret_cast(&data.tag_low), sizeof(data.tag_low)); is.read(reinterpret_cast(&data.ColdContent), sizeof(data.ColdContent)); is.read(reinterpret_cast(&data.ColdContentSoil), sizeof(data.ColdContentSoil)); is.read(reinterpret_cast(&data.dIntEnergy), sizeof(data.dIntEnergy)); @@ -2340,12 +3009,13 @@ std::iostream& operator>>(std::iostream& is, SnowStation& data) is.read(reinterpret_cast(&data.comb_thresh_rg), sizeof(data.comb_thresh_rg)); is.read(reinterpret_cast(&data.thresh_moist_snow), sizeof(data.thresh_moist_snow)); is.read(reinterpret_cast(&data.thresh_moist_soil), sizeof(data.thresh_moist_soil)); - is.read(reinterpret_cast(&data.number_top_elements), sizeof(data.number_top_elements));*/ - is.read(reinterpret_cast(&data.number_of_solutes), sizeof(data.number_of_solutes)); + is.read(reinterpret_cast(&data.number_top_elements), sizeof(data.number_top_elements)); + is.read(reinterpret_cast(&data.number_of_solutes), sizeof(data.number_of_solutes));*/ // private member variables: is.read(reinterpret_cast(&data.nNodes), sizeof(data.nNodes)); is.read(reinterpret_cast(&data.nElems), sizeof(data.nElems)); + is.read(reinterpret_cast(&data.maxElementID), sizeof(data.maxElementID)); is.read(reinterpret_cast(&data.useCanopyModel), sizeof(data.useCanopyModel)); is.read(reinterpret_cast(&data.useSoilLayers), sizeof(data.useSoilLayers)); return is; @@ -2353,12 +3023,12 @@ std::iostream& operator>>(std::iostream& is, SnowStation& data) const std::string SnowStation::toString() const { - std::stringstream os; + std::ostringstream os; os << "" << "\n"; os << meta.toString(); os << setprecision(4); //os << fixed; -// os << nElems << " element(s) and " << nNodes << " node(s)."; + //os << nElems << " element(s) and " << nNodes << " node(s)."; if(useSoilLayers) os << " Soil=true"; else @@ -2369,8 +3039,8 @@ const std::string SnowStation::toString() const os << " canopy=false"; os << "\n"; - os << "Soil:\tSoilNode=" << SoilNode << " depth=" << Ground << " BareSoil_z0=" << BareSoil_z0 << " SoilAlb=" << SoilAlb << "\n"; - os << "Snow:\tMeasured HS=" << mH << " Calculated HS=" << cH << " SWE=" << swe << " LWCtot" << lwc_sum << " New snow=" << hn << " of density=" << rho_hn << "\n"; + os << "Soil:\tSoilNode=" << SoilNode << " depth=" << Ground << " BareSoil_z0=" << BareSoil_z0 << " SoilAlb=" << SoilAlb << " SoilEmissivity=" << SoilEmissivity << "\n"; + os << "Snow:\tMeasured HS=" << mH << " Calculated HS=" << cH << " SWE=" << swe << " LWCtot" << lwc_sum << " LWCtotSoil" << lwc_sum_soil << " SWCtotSoil" << swc_sum_soil << " New snow=" << hn << " of density=" << rho_hn << "\n"; os << "Snow Albedo:\tAlbedo=" << Albedo << " parametrized Albedo=" << pAlbedo << "\n"; os << "Energy:\tColdContent=" << ColdContent << " dIntEnergy=" << dIntEnergy; os << "Snowdrift:\tsector=" << sector << " windward=" << windward << " ErosionLevel=" << ErosionLevel << " ErosionMass=" << ErosionMass << "\n"; @@ -2398,9 +3068,9 @@ const std::string SnowStation::toString() const CurrentMeteo::CurrentMeteo() : date(), ta(0.), rh(0.), rh_avg(0.), vw(0.), vw_avg(0.), vw_max(0.), dw(0.), vw_drift(0.), dw_drift(0.), ustar(0.), z0(0.), psi_s(0.), - iswr(0.), rswr(0.), mAlbedo(0.), diff(0.), dir_h(0.), elev(0.), ea(0.), tss(0.), tss_a12h(0.), tss_a24h(0.), ts0(0.), - psum(0.), psum_ph(IOUtils::nodata), hs(0.), hs_a3h(0.), hs_rate(0.), adv_heat(IOUtils::nodata), - ts(), zv_ts(), conc(SnowStation::number_of_solutes, 0.), rho_hn(0.), + iswr(0.), rswr(0.), mAlbedo(0.), diff(0.), dir_h(0.), elev(0.), ea(0.), lw_net(IOUtils::nodata), tss(0.), tss_a12h(0.), tss_a24h(0.), ts0(0.), + psum(0.), psum_ph(IOUtils::nodata), psum_tech(IOUtils::nodata), hs(0.), hs_a3h(0.), hs_rate(0.), geo_heat(IOUtils::nodata), adv_heat(IOUtils::nodata), + ts(), zv_ts(), conc(SnowStation::number_of_solutes, 0.), rho_hn(0.), rime_hn(0.), lwc_hn(0.), poor_ea(false), fixedPositions(), minDepthSubsurf(), maxNumberMeasTemperatures(), numberMeasTemperatures(mio::IOUtils::unodata), numberFixedRates() {} @@ -2409,14 +3079,15 @@ CurrentMeteo::CurrentMeteo() CurrentMeteo::CurrentMeteo(const SnowpackConfig& cfg) : date(), ta(0.), rh(0.), rh_avg(0.), vw(0.), vw_avg(0.), vw_max(0.), dw(0.), vw_drift(0.), dw_drift(0.), ustar(0.), z0(0.), psi_s(0.), - iswr(0.), rswr(0.), mAlbedo(0.), diff(0.), dir_h(0.), elev(0.), ea(0.), tss(0.), tss_a12h(0.), tss_a24h(0.), ts0(0.), - psum(0.), psum_ph(IOUtils::nodata), hs(0.), hs_a3h(0.), hs_rate(0.), adv_heat(IOUtils::nodata), - ts(), zv_ts(), conc(SnowStation::number_of_solutes, 0.), rho_hn(0.), + iswr(0.), rswr(0.), mAlbedo(0.), diff(0.), dir_h(0.), elev(0.), ea(0.), lw_net(IOUtils::nodata), tss(0.), tss_a12h(0.), tss_a24h(0.), ts0(0.), + psum(0.), psum_ph(IOUtils::nodata), psum_tech(IOUtils::nodata), hs(0.), hs_a3h(0.), hs_rate(0.), geo_heat(IOUtils::nodata), adv_heat(IOUtils::nodata), + ts(), zv_ts(), conc(SnowStation::number_of_solutes, 0.), rho_hn(0.), rime_hn(0.), lwc_hn(0.), poor_ea(false), fixedPositions(), minDepthSubsurf(), maxNumberMeasTemperatures(), numberMeasTemperatures(mio::IOUtils::unodata), numberFixedRates() { maxNumberMeasTemperatures = cfg.get("MAX_NUMBER_MEAS_TEMPERATURES", "SnowpackAdvanced"); - fixedPositions = std::vector();//cfg.get("FIXED_POSITIONS", "SnowpackAdvanced")); + fixedPositions = std::vector(); + //cfg.getValue("FIXED_POSITIONS", "SnowpackAdvanced", fixedPositions); minDepthSubsurf = cfg.get("MIN_DEPTH_SUBSURF", "SnowpackAdvanced"); numberFixedRates = cfg.get("NUMBER_FIXED_RATES", "SnowpackAdvanced"); } @@ -2433,7 +3104,7 @@ void CurrentMeteo::reset(const SnowpackConfig& i_cfg) * label the columns: TS1, TS2, TS3, etc. * - User defined positions (m) should be provided in the advanced section, for example, * FIXED_POSITIONS = "0.25 0.50 -0.10": -* - positive values refer to heigths measured from the ground surface (snow only) +* - positive values refer to heights measured from the ground surface (snow only) * - negative values refer to depths measured from either the ground surface or the snow surface in case no soil * layers are present * - There may be be more FIXED_POSITIONS than measured temperatures. In that case, the first positions are @@ -2550,7 +3221,7 @@ void CurrentMeteo::copySolutes(const mio::MeteoData& md, const size_t& i_number_ } } -std::iostream& operator<<(std::iostream& os, const CurrentMeteo& data) +std::ostream& operator<<(std::ostream& os, const CurrentMeteo& data) { os << data.date; os.write(reinterpret_cast(&data.ta), sizeof(data.ta)); @@ -2572,34 +3243,40 @@ std::iostream& operator<<(std::iostream& os, const CurrentMeteo& data) os.write(reinterpret_cast(&data.dir_h), sizeof(data.dir_h)); os.write(reinterpret_cast(&data.elev), sizeof(data.elev)); os.write(reinterpret_cast(&data.ea), sizeof(data.ea)); + os.write(reinterpret_cast(&data.lw_net), sizeof(data.lw_net)); os.write(reinterpret_cast(&data.tss), sizeof(data.tss)); os.write(reinterpret_cast(&data.tss_a12h), sizeof(data.tss_a12h)); os.write(reinterpret_cast(&data.tss_a24h), sizeof(data.tss_a24h)); os.write(reinterpret_cast(&data.ts0), sizeof(data.ts0)); os.write(reinterpret_cast(&data.psum), sizeof(data.psum)); os.write(reinterpret_cast(&data.psum_ph), sizeof(data.psum_ph)); + os.write(reinterpret_cast(&data.psum_tech), sizeof(data.psum_tech)); os.write(reinterpret_cast(&data.hs), sizeof(data.hs)); os.write(reinterpret_cast(&data.hs_a3h), sizeof(data.hs_a3h)); os.write(reinterpret_cast(&data.hs_rate), sizeof(data.hs_rate)); + os.write(reinterpret_cast(&data.geo_heat), sizeof(data.geo_heat)); os.write(reinterpret_cast(&data.adv_heat), sizeof(data.adv_heat)); const size_t s_ts = data.ts.size(); os.write(reinterpret_cast(&s_ts), sizeof(size_t)); -// for (size_t ii=0; ii(&s_zv_ts), sizeof(size_t)); -// for (size_t ii=0; ii(&s_conc), sizeof(size_t)); -// for (size_t ii=0; ii(&data.rho_hn), sizeof(data.rho_hn)); + os.write(reinterpret_cast(&data.rime_hn), sizeof(data.rime_hn)); + os.write(reinterpret_cast(&data.lwc_hn), sizeof(data.lwc_hn)); + os.write(reinterpret_cast(&data.poor_ea), sizeof(data.poor_ea)); const size_t s_fixedPositions = data.fixedPositions.size(); os.write(reinterpret_cast(&s_fixedPositions), sizeof(size_t)); -// for (size_t ii=0; ii(&data.minDepthSubsurf), sizeof(data.minDepthSubsurf)); os.write(reinterpret_cast(&data.maxNumberMeasTemperatures), sizeof(data.maxNumberMeasTemperatures)); @@ -2608,7 +3285,7 @@ std::iostream& operator<<(std::iostream& os, const CurrentMeteo& data) return os; } -std::iostream& operator>>(std::iostream& is, CurrentMeteo& data) +std::istream& operator>>(std::istream& is, CurrentMeteo& data) { is >> data.date; is.read(reinterpret_cast(&data.ta), sizeof(data.ta)); @@ -2630,15 +3307,18 @@ std::iostream& operator>>(std::iostream& is, CurrentMeteo& data) is.read(reinterpret_cast(&data.dir_h), sizeof(data.dir_h)); is.read(reinterpret_cast(&data.elev), sizeof(data.elev)); is.read(reinterpret_cast(&data.ea), sizeof(data.ea)); + is.read(reinterpret_cast(&data.lw_net), sizeof(data.lw_net)); is.read(reinterpret_cast(&data.tss), sizeof(data.tss)); is.read(reinterpret_cast(&data.tss_a12h), sizeof(data.tss_a12h)); is.read(reinterpret_cast(&data.tss_a24h), sizeof(data.tss_a24h)); is.read(reinterpret_cast(&data.ts0), sizeof(data.ts0)); is.read(reinterpret_cast(&data.psum), sizeof(data.psum)); is.read(reinterpret_cast(&data.psum_ph), sizeof(data.psum_ph)); + is.read(reinterpret_cast(&data.psum_tech), sizeof(data.psum_tech)); is.read(reinterpret_cast(&data.hs), sizeof(data.hs)); is.read(reinterpret_cast(&data.hs_a3h), sizeof(data.hs_a3h)); is.read(reinterpret_cast(&data.hs_rate), sizeof(data.hs_rate)); + is.read(reinterpret_cast(&data.geo_heat), sizeof(data.geo_heat)); is.read(reinterpret_cast(&data.adv_heat), sizeof(data.adv_heat)); size_t s_ts; @@ -2657,6 +3337,9 @@ std::iostream& operator>>(std::iostream& is, CurrentMeteo& data) for (size_t ii=0; ii> data.conc[ii]; is.read(reinterpret_cast(&data.rho_hn), sizeof(data.rho_hn)); + is.read(reinterpret_cast(&data.rime_hn), sizeof(data.rime_hn)); + is.read(reinterpret_cast(&data.lwc_hn), sizeof(data.lwc_hn)); + is.read(reinterpret_cast(&data.poor_ea), sizeof(data.poor_ea)); size_t s_fixedPositions; is.read(reinterpret_cast(&s_fixedPositions), sizeof(size_t)); @@ -2672,7 +3355,7 @@ std::iostream& operator>>(std::iostream& is, CurrentMeteo& data) const std::string CurrentMeteo::toString() const { - std::stringstream os; + std::ostringstream os; const double to_deg = 180. / mio::Cst::PI; os << "" << "\n"; os << date.toString(Date::ISO) << "\n"; @@ -2681,11 +3364,11 @@ const std::string CurrentMeteo::toString() const os << setw(8) << "RH=" << rh << " rh_avg=" << rh_avg << "\n"; os << setw(8) << "ISWR=" << iswr << " RSWR=" << rswr << " mAlbedo=" << mAlbedo << "\n"; os << setw(8) << "diff=" << diff << " dir_h=" << dir_h << " Sun_elev=" << elev*to_deg << "° EA=" << ea << "\n"; - os << setw(8) << "PSUM=" << psum << " PSUM_PH=" << psum_ph << " HS=" << hs << " rho_hn=" << rho_hn << "\n"; + os << setw(8) << "PSUM=" << psum << " PSUM_PH=" << psum_ph << " HS=" << hs << " rho_hn=" << rho_hn << " PSUM_TECH=" << psum_tech << "\n"; os << setw(8) << "VW=" << vw << " vw_avg=" << vw_avg << " vw_max=" << vw_max << " vw_drift=" << vw_drift << "\n"; os << setw(8) << "DW=" << dw << "\n"; os << setw(8) << "U*=" << ustar << " z0=" << z0 << " psi_s=" << psi_s << "\n"; - + os << setw(8) << "RIME_HN=" << rime_hn; //os << std::setprecision(10); if(!ts.empty()) os << " "; for (unsigned int ii=0; ii(&data.Canopy_LAI), sizeof(data.Canopy_LAI)); os.write(reinterpret_cast(&data.Canopy_BasalArea), sizeof(data.Canopy_BasalArea)); os.write(reinterpret_cast(&data.Canopy_Direct_Throughfall), sizeof(data.Canopy_Direct_Throughfall)); + os.write(reinterpret_cast(&data.Canopy_diameter), sizeof(data.Canopy_diameter)); + os.write(reinterpret_cast(&data.Canopy_lai_frac_top_default), sizeof(data.Canopy_lai_frac_top_default)); + os.write(reinterpret_cast(&data.Canopy_int_cap_snow), sizeof(data.Canopy_int_cap_snow)); + os.write(reinterpret_cast(&data.Canopy_alb_dry), sizeof(data.Canopy_alb_dry)); + os.write(reinterpret_cast(&data.Canopy_alb_wet), sizeof(data.Canopy_alb_wet)); + os.write(reinterpret_cast(&data.Canopy_alb_snow), sizeof(data.Canopy_alb_snow)); + os.write(reinterpret_cast(&data.Emissivity_soil), sizeof(data.Emissivity_soil)); os.write(reinterpret_cast(&data.WindScalingFactor), sizeof(data.WindScalingFactor)); os.write(reinterpret_cast(&data.ErosionLevel), sizeof(data.ErosionLevel)); os.write(reinterpret_cast(&data.TimeCountDeltaHS), sizeof(data.TimeCountDeltaHS)); return os; } -std::iostream& operator>>(std::iostream& is, SN_SNOWSOIL_DATA& data) +std::istream& operator>>(std::istream& is, SN_SNOWSOIL_DATA& data) { is >> data.meta; is >> data.profileDate; @@ -2749,36 +3439,50 @@ std::iostream& operator>>(std::iostream& is, SN_SNOWSOIL_DATA& data) is.read(reinterpret_cast(&data.Canopy_LAI), sizeof(data.Canopy_LAI)); is.read(reinterpret_cast(&data.Canopy_BasalArea), sizeof(data.Canopy_BasalArea)); is.read(reinterpret_cast(&data.Canopy_Direct_Throughfall), sizeof(data.Canopy_Direct_Throughfall)); + is.read(reinterpret_cast(&data.Canopy_diameter), sizeof(data.Canopy_diameter)); + is.read(reinterpret_cast(&data.Canopy_lai_frac_top_default), sizeof(data.Canopy_lai_frac_top_default)); + is.read(reinterpret_cast(&data.Canopy_int_cap_snow), sizeof(data.Canopy_int_cap_snow)); + is.read(reinterpret_cast(&data.Canopy_alb_dry), sizeof(data.Canopy_alb_dry)); + is.read(reinterpret_cast(&data.Canopy_alb_wet), sizeof(data.Canopy_alb_wet)); + is.read(reinterpret_cast(&data.Canopy_alb_snow), sizeof(data.Canopy_alb_snow)); + is.read(reinterpret_cast(&data.Emissivity_soil), sizeof(data.Emissivity_soil)); is.read(reinterpret_cast(&data.WindScalingFactor), sizeof(data.WindScalingFactor)); is.read(reinterpret_cast(&data.ErosionLevel), sizeof(data.ErosionLevel)); is.read(reinterpret_cast(&data.TimeCountDeltaHS), sizeof(data.TimeCountDeltaHS)); + return is; } const std::string SN_SNOWSOIL_DATA::toString() const { - std::stringstream os; + std::ostringstream os; os << "\n"; os << meta.toString() << "\n"; - os << "profileDate: " << profileDate.toString(Date::ISO) << "\n"; - os << "nN: " << nN << "\n"; - os << "Height: " << Height << "\n"; - os << "nLayers: " << nLayers << "\n"; + os << "profileDate: " << profileDate.toString(Date::ISO) << "\n"; + os << "nN: " << nN << "\n"; + os << "Height: " << Height << "\n"; + os << "nLayers: " << nLayers << "\n"; for(size_t ii=0; ii\n"; return os.str(); @@ -2786,7 +3490,7 @@ const std::string SN_SNOWSOIL_DATA::toString() const const std::string SurfaceFluxes::toString() const { - std::stringstream os; + std::ostringstream os; os << "" << "\n"; os << std::setprecision(10); os << "Long wave: lw_in=" << lw_in << " lw_out=" << lw_out << " lw_net=" << lw_net << "\n"; @@ -2814,14 +3518,14 @@ const std::string SurfaceFluxes::toString() const } LayerData::LayerData() : depositionDate(), hl(0.), ne(0), tl(0.), - phiSoil(0.), phiIce(0.), phiWater(0.), phiVoids(0.), + phiSoil(0.), phiIce(0.), phiIceReservoir(0.), phiIceReservoirCumul(0.), phiWater(0.), phiWaterPref(0.), phiVoids(0.), cSoil(SnowStation::number_of_solutes), cIce(SnowStation::number_of_solutes), cWater(SnowStation::number_of_solutes), cVoids(SnowStation::number_of_solutes), SoilRho(0.), SoilK(0.), SoilC(0.), - rg(0.), sp(0.), dd(0.), rb(0.), mk(0), hr(0.), CDot(0.), metamo(0.) + rg(0.), sp(0.), dd(0.), rb(0.), mk(0), hr(0.), CDot(0.), metamo(0.), salinity(0.), h(Constants::undefined), dsm(0.) { } -std::iostream& operator<<(std::iostream& os, const LayerData& data) +std::ostream& operator<<(std::ostream& os, const LayerData& data) { os << data.depositionDate; os.write(reinterpret_cast(&data.hl), sizeof(data.hl)); @@ -2829,7 +3533,10 @@ std::iostream& operator<<(std::iostream& os, const LayerData& data) os.write(reinterpret_cast(&data.tl), sizeof(data.tl)); os.write(reinterpret_cast(&data.phiSoil), sizeof(data.phiSoil)); os.write(reinterpret_cast(&data.phiIce), sizeof(data.phiIce)); + os.write(reinterpret_cast(&data.phiIceReservoir), sizeof(data.phiIceReservoir)); + os.write(reinterpret_cast(&data.phiIceReservoirCumul), sizeof(data.phiIceReservoirCumul)); os.write(reinterpret_cast(&data.phiWater), sizeof(data.phiWater)); + os.write(reinterpret_cast(&data.phiWaterPref), sizeof(data.phiWaterPref)); os.write(reinterpret_cast(&data.phiVoids), sizeof(data.phiVoids)); const size_t s_csoil = data.cSoil.size(); @@ -2860,10 +3567,13 @@ std::iostream& operator<<(std::iostream& os, const LayerData& data) os.write(reinterpret_cast(&data.hr), sizeof(data.hr)); os.write(reinterpret_cast(&data.CDot), sizeof(data.CDot)); os.write(reinterpret_cast(&data.metamo), sizeof(data.metamo)); + os.write(reinterpret_cast(&data.salinity), sizeof(data.salinity)); + os.write(reinterpret_cast(&data.h), sizeof(data.h)); + os.write(reinterpret_cast(&data.dsm), sizeof(data.dsm)); return os; } -std::iostream& operator>>(std::iostream& is, LayerData& data) +std::istream& operator>>(std::istream& is, LayerData& data) { is >> data.depositionDate; is.read(reinterpret_cast(&data.hl), sizeof(data.hl)); @@ -2871,7 +3581,10 @@ std::iostream& operator>>(std::iostream& is, LayerData& data) is.read(reinterpret_cast(&data.tl), sizeof(data.tl)); is.read(reinterpret_cast(&data.phiSoil), sizeof(data.phiSoil)); is.read(reinterpret_cast(&data.phiIce), sizeof(data.phiIce)); + is.read(reinterpret_cast(&data.phiIceReservoir), sizeof(data.phiIceReservoir)); + is.read(reinterpret_cast(&data.phiIceReservoirCumul), sizeof(data.phiIceReservoirCumul)); is.read(reinterpret_cast(&data.phiWater), sizeof(data.phiWater)); + is.read(reinterpret_cast(&data.phiWaterPref), sizeof(data.phiWaterPref)); is.read(reinterpret_cast(&data.phiVoids), sizeof(data.phiVoids)); size_t s_csoil; @@ -2906,140 +3619,26 @@ std::iostream& operator>>(std::iostream& is, LayerData& data) is.read(reinterpret_cast(&data.hr), sizeof(data.hr)); is.read(reinterpret_cast(&data.CDot), sizeof(data.CDot)); is.read(reinterpret_cast(&data.metamo), sizeof(data.metamo)); + is.read(reinterpret_cast(&data.salinity), sizeof(data.salinity)); + is.read(reinterpret_cast(&data.h), sizeof(data.h)); + is.read(reinterpret_cast(&data.dsm), sizeof(data.dsm)); return is; } const std::string LayerData::toString() const { - std::stringstream os; + std::ostringstream os; os << "\n"; os << depositionDate.toString(mio::Date::ISO) << "\n"; os << "\theight:" << hl << " (" << ne << "elements) at " << tl << "K\n"; - os << "\tvolumetric contents: " << phiIce << " ice, " << phiWater << " water, " << phiVoids << " voids, "; -// os << phiSoil << " soil, total = " << phiIce+phiWater+phiVoids+phiSoil << "%\n"; + os << "\tvolumetric contents: " << phiIce << " ice, " << phiWater << " water, " << phiWaterPref << " water_pref, " << phiVoids << " voids, "; +// os << phiSoil << " soil, total = " << phiIce+phiWater+phiWaterPref+phiVoids+phiSoil << "%\n"; os << "\tSoil properties: " << SoilRho << " kg/m^3, " << SoilK << " W/(m*K), " << SoilC << " J/K\n"; os << "\tSoil microstructure: rg=" << rg << " sp=" << sp << " dd=" << dd << " rb=" << rb << " mk=" << mk << "\n"; - os << "\tStability: surface hoar=" << hr << " kg/m^2, stress rate=" << CDot << " Pa/s, metamo=" << metamo << "\n"; + os << "\tStability: surface hoar=" << hr << " kg/m^2, stress rate=" << CDot << " Pa/s, metamo=" << metamo << "dsm=" << dsm << "\n"; os << "\tNumber of solutes: " << cSoil.size() << " in soil, " << cIce.size() << " in ice, " << cWater.size() << " in water, " << cVoids.size() << " in voids\n"; os << "\n"; return os.str(); } - -/// @brief To be set while using the explicit metamorphism model to output ML2L and lp on tagged elements -const bool Tag::metamo_expl = false; - -Tag::Tag() - : label(), date(), elem(static_cast(-1)), previous_depth(IOUtils::nodata), - etaNS(IOUtils::nodata), etaMSU(IOUtils::nodata), ML2L(IOUtils::nodata), lp(IOUtils::nodata) -{} - -/** - * @brief Compute tag properties - * @author Charles Fierz - * @version 10.05 - * @param Edata - */ -void Tag::compute_properties(const ElementData& Edata) -{ - etaNS = SnLaws::NewSnowViscosityLehning(Edata); - etaMSU = SnLaws::SnowViscosityMSU(Edata); - - // set ML2L and lp to NODATA if not using the explicit metamorphism model - if (!Tag::metamo_expl) { - ML2L = lp = IOUtils::nodata; - } -} - -/** - * @brief Reposition tag - * @author Charles Fierz - * @version 10.03 - * @bug Don't be surprised ... - * @param useSoilLayers - * @param z Position of corresponding sensor perpendicular to slope (m) - * @param Xdata - */ -void Tag::reposition_tag(const bool&, const double& z, SnowStation& Xdata) -{ - //HACK: double z_pos = getPerpSensorPosition(useSoilLayers, z, Xdata.cH, Xdata.Ground, Xdata.meta.getSlopeAngle()); - - //INITIAL_HS = Xdata.cH; //HACK: why set this value here? - Xdata.Edata[elem].mk %= 100; - - const size_t n_up = findUpperNode(z, Xdata.Ndata, Xdata.getNumberOfNodes()); // Upper node number - - elem = n_up - 1; - compute_properties(Xdata.Edata.at(n_up-1)); -} - -TaggingData::TaggingData(const double& i_calculation_step_length) - : useSoilLayers(false), surface_write(false), - calculation_step_length(i_calculation_step_length), - tag_low(1), tag_top(99), repos_low(1), repos_top(99), tags(), number_tags(0) -{} - -void TaggingData::resize(size_t i_size) -{ - if ((i_size != IOUtils::npos) && (i_size > 0)) { - tags.resize(i_size); - number_tags = i_size - 1; - } else { - //throw exception - } -} - -/** - * @brief Update tags - * -# Event driven tagging according to TAG_EVENT - * -# Tagging of surface element on given date - * @author Charles Fierz - * @version 10.04 - * @param Mdata - * @param Xdata - */ -void TaggingData::update_tags(const CurrentMeteo& Mdata, SnowStation& Xdata) -{ - const bool TAG_EVENT = false; - - if ( (tags.back().date == Date()) && TAG_EVENT ) { - tags.back().date = Mdata.date; - } - - for(size_t tag = 1; tag <= number_tags; tag++) { //HACK: check indices - const size_t e = Xdata.find_tag(tag); - if (e != IOUtils::npos) { - tags[tag-1].elem = e; - tags[tag-1].compute_properties(Xdata.Edata[e]); - - } else if ((Xdata.Edata.back().mk < 100) && (Mdata.date >= tags[tag-1].date) - && (Mdata.date < (tags[tag-1].date + M_TO_D(calculation_step_length))) ) { - Xdata.Edata.back().mk += tag*100; - tags[tag-1].compute_properties(Xdata.Edata.back()); - } else { - //??? - continue; - } - - if ((tag >= repos_low) && (tag <= repos_top)) { - const size_t depth = Mdata.getNumberFixedPositions() + tag - 1; - - if ((Mdata.zv_ts[depth] > tags[tag-1].previous_depth)) { - tags[tag-1].reposition_tag(useSoilLayers, Mdata.zv_ts[depth], Xdata); - } - tags[tag-1].previous_depth = Mdata.zv_ts[depth]; - } - } - - for (size_t tag = repos_low; tag <= repos_top; tag++) { // TODO make sure that no marker has been overwritten - if ( Xdata.Edata[tags[tag-1].elem].mk < 100 ) { - Xdata.Edata[tags[tag-1].elem].mk += tag*100; - } - } - - if ( surface_write ) { // There ARE NUMBER_TAGS tags structures!!! - tags[number_tags].compute_properties(Xdata.Edata.back()); - } - -} diff --git a/third_party/snowpack/DataClasses.h b/third_party/snowpack/DataClasses.h index d8503b92..711298b8 100644 --- a/third_party/snowpack/DataClasses.h +++ b/third_party/snowpack/DataClasses.h @@ -27,6 +27,8 @@ #define DATACLASSES_H #include "SnowpackConfig.h" +#include "vanGenuchten.h" +#include "snowpackCore/SeaIce.h" #include "Constants.h" #include @@ -65,8 +67,8 @@ class ZwischenData { ZwischenData(): hoar24(48, 0.0), drift24(48, 0.0), hn3(144, 0.0), hn24(144, 0.0) {} void reset(); ///< Sets all the values in the vectors to 0.0 - friend std::iostream& operator<<(std::iostream& os, const ZwischenData& data); - friend std::iostream& operator>>(std::iostream& is, ZwischenData& data); + friend std::ostream& operator<<(std::ostream& os, const ZwischenData& data); + friend std::istream& operator>>(std::istream& is, ZwischenData& data); std::vector hoar24; ///< Twenty-four hour hoar index every half-hour over one day 48 std::vector drift24; ///< Twenty-four hour hoar index every half-hour over one day 48 @@ -82,6 +84,7 @@ class CurrentMeteo { public: CurrentMeteo(); CurrentMeteo(const SnowpackConfig& i_cfg); + void reset(const SnowpackConfig& i_cfg); void setMeasTempParameters(const mio::MeteoData& md); size_t getNumberMeasTemperatures() const; @@ -93,8 +96,8 @@ class CurrentMeteo { void copySolutes(const mio::MeteoData& md, const size_t& i_number_of_solutes); const std::string toString() const; - friend std::iostream& operator<<(std::iostream& os, const CurrentMeteo& data); - friend std::iostream& operator>>(std::iostream& is, CurrentMeteo& data); + friend std::ostream& operator<<(std::ostream& os, const CurrentMeteo& data); + friend std::istream& operator>>(std::istream& is, CurrentMeteo& data); mio::Date date; ///< Date of current meteo data double ta; ///< Air temperature (K) @@ -117,21 +120,28 @@ class CurrentMeteo { double dir_h; ///< Horizontal direct radiation from the sky (W m-2) double elev; ///< Solar elevation to be used in Canopy.c (rad) => see also double ea; ///< Atmospheric emissivity (1) + double lw_net; ///< Net longwave radiation (W m-2) double tss; ///< Snow surface temperature (K) double tss_a12h; ///< Snow surface temperature averaged over past 12 hours (K) double tss_a24h; ///< Snow surface temperature averaged over past 24 hours (K) double ts0; ///< Bottom temperatures of snow/soil pack (K) - double psum; ///< precipitation sum over the current timestep - double psum_ph; ///< precipitation phase for the current timestep (between 0 and 1, 0 is fully solid while 1 is fully liquid). + double psum; ///< precipitation sum over the current timestep (mm) + double psum_ph; ///< precipitation phase for the current timestep (between 0 and 1, 0 is fully solid while 1 is fully liquid). + double psum_tech;///< Equivalent precipitation water sum for technical snow over the current timestep (mm) double hs; ///< The measured height of snow (m) double hs_a3h; ///< Snow depth averaged over 3 past hours double hs_rate; ///< The rate of change in snow depth (m h-1) + double geo_heat; ///< Geo heat flux (W/m^2), for the neumann lower boundary condition in the heat equation double adv_heat; ///< Advective heat to inject in the soil (if ADVECTIVE_HEAT and related parameters set to true) std::vector ts; ///< Measured snow or/and soil temperatures (K) std::vector zv_ts; ///< Positions of all measured snow or/and soil temperatures (m) std::vector conc; ///< Solute concentrations in precipitation double rho_hn; ///< Measured new snow density (kg m-3) + double rime_hn; ///< riming index of new snow + double lwc_hn; ///< liquid water content of new snow + + bool poor_ea; ///< when ilwr has not been measured nor parametrized in good conditions, it could be redone later on private: size_t getNumberMeasTemperatures(const mio::MeteoData& md); @@ -153,10 +163,11 @@ enum SN_FIELD{ /// @brief The 4 different components of the porous matrix enum { - SOIL, ///< Soil - ICE, ///< Ice - WATER, ///< Water - AIR, ///< Air + SOIL, ///< Soil + ICE, ///< Ice + WATER, ///< Water + WATER_PREF, ///< Water in preferential flow + AIR, ///< Air N_COMPONENTS }; @@ -169,16 +180,17 @@ enum SN_SOIL_DATA{ }; /** - * @brief Parameters of the different layers of the snowpack \n - * The layers form a vector within the SSdata (profile) data structure. + * @brief Parameters of the different layers of the snowpack. + * @details The layers form a vector within the SSdata (profile) data structure. + * This is only used by SN_SNOWSOIL_DATA and filled in the plugins */ class LayerData { public: LayerData(); const std::string toString() const; - friend std::iostream& operator<<(std::iostream& os, const LayerData& data); - friend std::iostream& operator>>(std::iostream& is, LayerData& data); + friend std::ostream& operator<<(std::ostream& os, const LayerData& data); + friend std::istream& operator>>(std::istream& is, LayerData& data); mio::Date depositionDate; ///< Date of deposition (mainly used for snow layers) double hl; ///< The thickness of the layer in m @@ -186,7 +198,10 @@ class LayerData { double tl; ///< Temperature at the top of the layer in K double phiSoil; ///< Volumetric soil content in % double phiIce; ///< Volumetric ice content in % + double phiIceReservoir; ///< Volumetric ice reservoir content in % + double phiIceReservoirCumul;///< Volumetric cumulated ice reservoir content in % double phiWater; ///< Volumetric water content in % + double phiWaterPref; ///< Volumetric preferential water content in % double phiVoids; ///< Volumetric void content in % std::vector cSoil; ///< Solute concentrations in Soil std::vector cIce; ///< Solute concentrations in Ice @@ -203,26 +218,33 @@ class LayerData { double hr; ///< Surface hoar Mass in kg m-2 double CDot; ///< Stress rate (Pa s-1), that is the LAST overload change rate double metamo; ///< keep track of metamorphism + double salinity; ///< bulk salinity (g/kg) + double h; ///< capillary pressure head (m) + double dsm; ///< dry snow metamorphism factor }; /** - * @brief SN_SNOWSOIL_DATA includes all important station parameters as well as LayerData \n - * This data structure will have to be replaced by something a little more complicated soon ??? + * @brief SN_SNOWSOIL_DATA includes all important station parameters as well as LayerData. + * @details This data structure will have to be replaced by something a little more complicated soon ??? * For now it is simply an efficient way of creating a snowpack to investigate. */ class SN_SNOWSOIL_DATA { public: SN_SNOWSOIL_DATA() : meta(), profileDate(), nN(0), Height(0.), - nLayers(0), Ldata(), HS_last(0.), Albedo(0.), SoilAlb(0.), BareSoil_z0(0.), - Canopy_Height(0.), Canopy_LAI(0.), Canopy_BasalArea(0.004), Canopy_Direct_Throughfall(0.), - WindScalingFactor(1.), ErosionLevel(0), TimeCountDeltaHS(0.) - { - Ldata.clear(); - } + nLayers(0), Ldata(), HS_last(0.), Albedo(mio::IOUtils::nodata), + SoilAlb(mio::IOUtils::nodata), BareSoil_z0(mio::IOUtils::nodata), + Canopy_Height(mio::IOUtils::nodata), Canopy_LAI(mio::IOUtils::nodata), + Canopy_Direct_Throughfall(mio::IOUtils::nodata), WindScalingFactor(1.), + ErosionLevel(static_cast(mio::IOUtils::nodata)), TimeCountDeltaHS(mio::IOUtils::nodata), + Canopy_BasalArea(mio::IOUtils::nodata), Canopy_diameter(mio::IOUtils::nodata), + Canopy_lai_frac_top_default(mio::IOUtils::nodata),Canopy_int_cap_snow(mio::IOUtils::nodata), + Canopy_alb_dry(mio::IOUtils::nodata),Canopy_alb_wet(mio::IOUtils::nodata), + Canopy_alb_snow(mio::IOUtils::nodata),Emissivity_soil(mio::IOUtils::nodata) + {} const std::string toString() const; - friend std::iostream& operator<<(std::iostream& os, const SN_SNOWSOIL_DATA& data); - friend std::iostream& operator>>(std::iostream& is, SN_SNOWSOIL_DATA& data); + friend std::ostream& operator<<(std::ostream& os, const SN_SNOWSOIL_DATA& data); + friend std::istream& operator>>(std::istream& is, SN_SNOWSOIL_DATA& data); mio::StationData meta; ///< Station meta data mio::Date profileDate; ///< Date of profile @@ -231,16 +253,28 @@ class SN_SNOWSOIL_DATA { size_t nLayers; ///< Total number of soil and snow layers at loading std::vector Ldata; ///< contains all the information required to construct the Xdata double HS_last; ///< Last checked calculated snow depth used for albedo control + /// REQUIRED PARAMETERS, an error will be thrown at reading (SnowpackIO) if no parameter are provided double Albedo; ///< Snow albedo double SoilAlb; ///< Soil albedo, default 0.2 double BareSoil_z0; ///< Bare soil roughness in m, default 0.02 m double Canopy_Height; ///< Canopy Height in m double Canopy_LAI; ///< Canopy Leaf Area Index in m2 m-2 - double Canopy_BasalArea; ///< Canopy Basal Area in m2 m-2 double Canopy_Direct_Throughfall; ///< Direct throughfall [fraction of precipitation] double WindScalingFactor; ///< Local scaling factor for wind at drift station int ErosionLevel; ///< Erosion Level in operational mode (flat field virtual erosion) double TimeCountDeltaHS; ///< Time counter tracking erroneous settlement in operational mode + /// OPTIONAL PARAMETERS, a warning will be thrown in CANOPY::Initialize if no value is provided + double Canopy_BasalArea; ///< Canopy Basal Area in m2 m-2 + double Canopy_diameter; ///< Average canopy (tree) diameter [m], parameter in the new radiation transfer model + double Canopy_lai_frac_top_default; ///< fraction of total LAI that is attributed to the uppermost layer. Here calibrated for Alptal. + double Canopy_int_cap_snow; ///< Specific interception capacity for rain (I_LAI) (mm/LAI) + double Canopy_alb_dry; // Albedo of dry canopy (calibr: 0.09, Alptal) + double Canopy_alb_wet; // Albedo of wet canopy (calibr: 0.09, Alptal) + double Canopy_alb_snow; // Albedo of snow covered albedo (calibr: 0.35, Alptal) + /// OPTIONAL PARAMETERS, if not provided Constants::emissivity_soil will be used + double Emissivity_soil; + + }; /** @@ -258,16 +292,20 @@ class ElementData { Exp ///< exponential law } Young_Modulus; - ElementData(); + ElementData(const unsigned short int& in_ID); + ElementData(const ElementData& cc); //required to get the correct back-reference in vanGenuchten object + ElementData& operator=(const ElementData&) = default; ///>(std::iostream& is, ElementData& data); + friend std::ostream& operator<<(std::ostream& os, const ElementData& data); + friend std::istream& operator>>(std::istream& is, ElementData& data); mio::Date depositionDate; ///< Date of deposition double L0, L; ///< Original and present element thickness (m) double Te; ///< mean element temperature (K) double gradT; ///< temperature gradient over element (K m-1) - double melting_tk; ///< melt temperature of layer (principally initialized as 0 degC, but enables possibility for freezing point depression) - double freezing_tk; ///< freezing temperature of layer (principally initialized as 0 degC, but enables possibility for freezing point depression) - std::vector theta; ///< volumetric contents: SOIL, ICE, WATER, AIR (1) + double meltfreeze_tk; ///< melt/freeze temperature of layer (principally initialized as 0 degC, but enables possibility for freezing point depression) + std::vector theta; ///< volumetric contents: SOIL, ICE, WATER, WATER_PREF, AIR (1) + double h; ///< capillary pressure head (m) mio::Array2D conc; ///< Concentration for chemical constituents in (kg m-3) std::vector k; ///< For example, heat conductivity of TEMPERATURE field (W m-1 K-1) // Stored in order to visualize constitutive laws @@ -312,6 +350,7 @@ class ElementData { size_t mk; ///< grain marker (history dependent) unsigned short int type; ///< grain class double metamo; ///< keep track of metamorphism + double salinity; ///< bulk salinity (PSU, which is g/kg) double dth_w; ///< Subsurface Melting & Freezing Data: change of water content double res_wat_cont; ///< Residual water content double Qmf; ///< Subsurface Melting & Freezing Data: change of energy due to phase changes (melt-freeze) @@ -327,21 +366,39 @@ class ElementData { double hard; ///< Parameterized hand hardness (1) double S_dr; ///< Stability Index based on deformation rate (Direct Action Avalanching) double crit_cut_length; ///< Critical cut length (m) - double theta_r; ///< Residual water content of previous time step (m^3/m^3), used exclusively for solving Richards equation in snow - double lwc_source; ///< Source/sink term for Richards equation + vanGenuchten VG; ///< Van Genuchten Model for water retention + double lwc_source; ///< Source/sink term for Richards equation (m^3/m^3 / timestep) + double PrefFlowArea; ///< Preferential flow path relative area (-) + double theta_w_transfer; ///< Volumetric content of water transferred from preferential flow to matrix domain (1) + double theta_i_reservoir; ///< Volumetric ice content in ice reservoir (1) + double theta_i_reservoir_cumul; ///< Volumetric ice content in cumulated ice reservoir (1) + double SlopeParFlux; ///< Slope parallel flux (m^3/m^3 * m / timestep) + double Qph_up; ///< Heat source/sink due to phase changes for the heat equation (W/m^3), at the upper node of the element + double Qph_down; ///< Heat source/sink due to phase changes for the heat equation (W/m^3), at the lower node of the element //NIED (H. Hirashima) - double dhf; + double dsm; ///< Dry snow metamorphism factor + double rime; ///< Rime index + + unsigned short int ID; ///< Element ID used to track elements + static const unsigned short int noID; + + double rhov; ///< vapor density...(kg/m^3) + double Qmm; ///< Heat source/sink due to phase changes in the case of vapor transport (W/m^3) + double vapTrans_fluxDiff; ///< vapor dissusion flux in the case of vapor transport (W/m^2/s) + double vapTrans_snowDenChangeRate; ///< snow density change rate in the case of vapor transport (kg/m^3/s) + double vapTrans_cumulativeDenChange; ///< cumulative density change in the case of vapor transport (kg/m^3) + double vapTrans_underSaturationDegree; ///< the degree of undersaturation, (rhov-rohv_sat)/rhov_sat (-) }; /// @brief NODAL DATA used as a pointer in the SnowStation structure class NodeData { public: NodeData() : z(0.), u(0.), f(0.), udot(0.), T(0.), S_n(0.), S_s(0.), ssi(6.), hoar(0.), - dhf(0.), S_dhf(0.), Sigdhf(0.) {} //HACK: set ssi to max_stability! + dsm(0.), S_dsm(0.), Sigdsm(0.), rime(0.), water_flux(0.), rhov(0.) {} //HACK: set ssi to max_stability! const std::string toString() const; - friend std::iostream& operator<<(std::iostream& os, const NodeData& data); - friend std::iostream& operator>>(std::iostream& is, NodeData& data); + friend std::ostream& operator<<(std::ostream& os, const NodeData& data); + friend std::istream& operator>>(std::istream& is, NodeData& data); double z; ///< nodal height from ground in m double u; ///< creep displacements in m @@ -354,9 +411,14 @@ class NodeData { double hoar; ///< Mass of surface hoar collected while node was exposed to surface //NIED (H. Hirashima) - double dhf; - double S_dhf; - double Sigdhf; + double dsm; ///< Dry snow metamorphism factor + double S_dsm; + double Sigdsm; + double rime; + + double water_flux; ///< Water flowing through the node (kg/m2). Positive values denote downward fluxes. + + double rhov; ///< nodal vapor density in kg/m^3 }; /** @@ -375,24 +437,102 @@ class NodeData { */ class CanopyData { public: - CanopyData() : storage(0.), temp(0.), sigf(0.), ec(0.), lai(0.), z0m(0.), z0h(0.), zdispl(0.), - height(0.), direct_throughfall(0.), ra(0.), rc(0.), rs(0.), rstransp(0.), canopyalb(0.), - totalalb(0.), wetfraction(0.), intcapacity(0.), rswrac(0.), iswrac(0.), rswrbc(0.), - iswrbc(0.), ilwrac(0.), rlwrac(0.), ilwrbc(0.), rlwrbc(0.), rsnet(0.), rlnet(0.), - sensible(0.), latent(0.), latentcorr(0.), transp(0.), intevap(0.), - interception(0.), throughfall(0.), snowunload(0.), - snowfac(0.), rainfac(0.),liquidfraction(0.), - sigftrunk(0), Ttrunk(0.), CondFluxCanop(0.), CondFluxTrunks(0.), - LWnet_Trunks(0.), SWnet_Trunks(0.), QStrunks(0.), - forestfloor_alb(0.), BasalArea(0), HMLeaves(0.), HMTrunks(0.) {} - + CanopyData() : int_cap_snow(0.), int_cap_rain(0.), interception_timecoef(0.), can_alb_dry(0.), + can_alb_wet(0.), can_alb_snow(0.), krnt_lai(0.), can_diameter(0.), biomass_heat_capacity(0.), + biomass_density(0.), lai_frac_top_default(0.), trunk_frac_height(0.), trunkalb(0.), et(0.), + canopy_stabilitycorrection(true), roughmom_to_canopyheight_ratio(0.), displ_to_canopyheight_ratio(0.), + raincrease_snow(0.), canopytemp_maxchange_perhour(0.), roughheat_to_roughmom_ratio(0.), + can_ch0(0.), can_rs_mult(0.), rsmin(0.), f3_gd(0.), rootdepth(0.), wp_fraction(0.), + h_wilt(0.), storage(0.), temp(0.), sigf(0.), ec(0.), lai(0.), z0m(0.), z0h(0.), zdispl(0.), + height(0.), direct_throughfall(0.), ra(0.), rc(0.), rs(0.), rstransp(0.), canopyalb(0.), + totalalb(0.), wetfraction(0.), intcapacity(0.), rswrac(0.), iswrac(0.), rswrbc(0.), iswrbc(0.), + ilwrac(0.), rlwrac(0.), ilwrbc(0.), rlwrbc(0.), rsnet(0.), rlnet(0.), sensible(0.), latent(0.), + latentcorr(0.), transp(0.), intevap(0.), interception(0.), throughfall(0.), snowunload(0.), + snowfac(0.), rainfac(0.), liquidfraction(0.), sigftrunk(0.), Ttrunk(0.), CondFluxCanop(0.), + CondFluxTrunks(0.), LWnet_Trunks(0.), SWnet_Trunks(0.), QStrunks(0.), forestfloor_alb(0.), + BasalArea(0.), HMLeaves(0.), HMTrunks(0.) {} + + void initialize(const SN_SNOWSOIL_DATA& SSdata, const bool useCanopyModel, const bool isAlpine3D); void reset(const bool& cumsum_mass); void initializeSurfaceExchangeData(); void multiplyFluxes(const double& factor); const std::string toString() const; - friend std::iostream& operator<<(std::iostream& os, const CanopyData& data); - friend std::iostream& operator>>(std::iostream& is, CanopyData& data); + friend std::ostream& operator<<(std::ostream& os, const CanopyData& data); + friend std::istream& operator>>(std::istream& is, CanopyData& data); + + /** + * CANOPY PARAMETERS DEFAULT VALUES + * These values can be changed in the .sno file + * brief History of changed values: + * - 2007-12-20: update based on data from all SnowMIP2 sites, and calibration using Alptal data + * brief Specific interception capacity for snow (i_LAI) (mm/LAI) \n + * Please note that this parameter is further multiplied with (0.27+46/new_snow_density[Ta]) following (Pomeroy et al, Hydr. Proc. 1998) + * - 5.9 Spruce and 6.6 Pine (Schmidt&Glums,CanJForRes,1991) + */ + double int_cap_snow; //iMax in Gouttevin,2015 + /// Specific interception capacity for rain (I_LAI) (mm/LAI) + double int_cap_rain; + /** Coef in interception function, see (Pomeroy et al,1998) where a value of 0.7 was + * found to be appropriate for hourly time-step, but smaller time steps require smaller + * values, 0.5 was found reasoanble by using the SnowMIP2 data (2007-12-09) + */ + double interception_timecoef; + + /// RADIATION BALANCE + double can_alb_dry; // Albedo of dry canopy (calibr: 0.09, Alptal) + double can_alb_wet; // Albedo of wet canopy (calibr: 0.09, Alptal) + double can_alb_snow; // Albedo of snow covered albedo (calibr: 0.35, Alptal) + double krnt_lai; // Radiation transmissivity parameter, in the range 0.4-0.8 if the true LAI is used; higher if optical LAI is used. + // (calibrated on Alptal) + double can_diameter; // average canopy (tree) diameter [m], parameter in the new radiation transfer model + /// ENERGY BALANCE + /// parameters for HeatMass and 2layercanopy + double biomass_heat_capacity; // from Linroth et al., 2013 (J Kg-1 K-1) + double biomass_density; // from Linroth et al., 2013 (Kg m-3) + double lai_frac_top_default; // fraction of total LAI that is attributed to the uppermost layer. Here calibrated for Alptal. + double trunk_frac_height; // (optional) fraction of total tree height occupied by trunks, + // used to calculate direct solar insolation of trunks. + double trunkalb; // trunk albedo + double et; // trunk emissivity + /// TURBULENT HEAT EXCHANGE + /// Stab. corr. aerodyn. resist. above and below canopy: 0=off and 1=on (Monin-Obukhov formulation) + bool canopy_stabilitycorrection; + /// Ratio between canopy height and roughness length + double roughmom_to_canopyheight_ratio; + /// As above for displacement height + double displ_to_canopyheight_ratio; + /** + * Fractional increase of aerodynamic resistance for evaporation of intercepted snow. + * - 10.0 from Koivusalo and Kokkonen (2002) + * - 8.0 calibration with Alptal data + */ + double raincrease_snow; + + /// @brief Maximum allowed canopy temperature change (K hr-1) + double canopytemp_maxchange_perhour; + /// @brief (~=1, but Not allowed to be exactly 1) + double roughheat_to_roughmom_ratio; + /// @brief minimum heat exchange (Wm-2K-1) at zero wind + double can_ch0; + /// @brief 1+CAN_RS_MULT = maximum factor to increase Cdata->rs below canopy + double can_rs_mult; + /// @brief TRANSPIRATION + /// @brief Minimum canopy surface resistance, 500 (sm-1) is for needle leaf treas van den Hurk et al (2000) *75% Gustafsson et al (2003) + double rsmin; + /** + * @brief gd (Pa-1) parameter for canopy surface resistance response to vapour pressure: + * - 0.0003 = trees (needle or broadleafs) + * - 0=crops, grass, tundra etc + */ + double f3_gd; + /// @brief Root depth, determining the soil layers influenced by root water uptake + double rootdepth; + /// @brief Wilting point, defined as a fraction of water content at field capacity (-) + double wp_fraction; + /// @brief Wilting point pressure head, when using Richards equation for soil. + double h_wilt; + //@} // State variable double storage; ///< intercepted water (mm or kg m-2) @@ -438,21 +578,21 @@ class CanopyData { double interception; double throughfall; double snowunload; - double snowfac; ///< snowfall above canopy - double rainfac; ///< rainfall above canopy + + double snowfac; ///< snowfall above canopy + double rainfac; ///< rainfall above canopy double liquidfraction; - double sigftrunk; ///< radiation interception cross section for trunk layer () - double Ttrunk; ///< trunk temperature (K) - double CondFluxCanop; ///< biomass heat storage flux towards Canopy (if 1L) towards Leaves (if 2L). (>0 towards canopy) - double CondFluxTrunks; ///< biomass heat storage flux towards Trunks (if 2L) - double LWnet_Trunks; ///< net LW to trunks (>0 towards trunks) - double SWnet_Trunks; ///< net SW to trunks (>0 towards trunks) - double QStrunks; ///< sensible heat flux from trunks (>0 if heat lost from trunk) + double sigftrunk; ///< radiation interception cross section for trunk layer () + double Ttrunk; ///< trunk temperature (K) + double CondFluxCanop; ///< biomass heat storage flux towards Canopy (if 1L) towards Leaves (if 2L). (>0 towards canopy) + double CondFluxTrunks; ///< biomass heat storage flux towards Trunks (if 2L) + double LWnet_Trunks; ///< net LW to trunks (>0 towards trunks) + double SWnet_Trunks; ///< net SW to trunks (>0 towards trunks) + double QStrunks; ///< sensible heat flux from trunks (>0 if heat lost from trunk) double forestfloor_alb; ///< albedo of the forest floor - double BasalArea; ///< basal area of trees on the stand - double HMLeaves; ///< Leaves heat mass (J K-1 /m2 ground surface) - double HMTrunks; ///< Trunks heat mass (J K-1 /m2 ground surface) - + double BasalArea; ///< basal area of trees on the stand + double HMLeaves; ///< Leaves heat mass (J K-1 /m2 ground surface) + double HMTrunks; ///< Trunks heat mass (J K-1 /m2 ground surface) }; /** @@ -461,9 +601,11 @@ class CanopyData { * It is used extensively not only during the finite element solution but also to control * the post-processing writes. It is initialized from SN_SNOWSOIL_DATA (at present). */ +class SeaIce; // Foreward-declare sea ice class class SnowStation { public: - explicit SnowStation(const bool& i_useCanopyModel=true, const bool& i_useSoilLayers=true); + explicit SnowStation(const bool i_useCanopyModel=true, const bool i_useSoilLayers=true, + const bool i_isAlpine3D=false, const bool i_useSeaIceModule=false); SnowStation(const SnowStation& c); ~SnowStation(); @@ -473,37 +615,45 @@ class SnowStation { void resize(const size_t& number_of_elements); void reduceNumberOfElements(const size_t& rnE); - void combineElements(const size_t& number_top_elements, const bool& reduce_n_elements); - void combineElements(const size_t& number_top_elements, const bool& reduce_n_elements, const size_t& cond); - static bool combineCondition(const ElementData& Edata0, const ElementData& Edata1, const double& depth, const bool& reduce_n_elements); + void combineElements(const size_t& number_top_elements, const bool& reduce_n_elements, const size_t& cond, const double& comb_thresh_l); + static bool combineCondition(const ElementData& Edata0, const ElementData& Edata1, const double& depth, const bool& reduce_n_elements, const double& comb_thresh_l); static void mergeElements(ElementData& Edata0, const ElementData& Edata1, const bool& merge, const bool& topElement); - void splitElements(); + void splitElement(const size_t& e); //Split an element + void splitElements(const double& max_element_length, const double& comb_thresh_l); //Check for splitting, calls splitElement(...) for actual splitting void compSnowpackMasses(); void compSnowpackInternalEnergyChange(const double& sn_dt); void compSoilInternalEnergyChange(const double& sn_dt); double getLiquidWaterIndex() const; double getModelledTemperature(const double& z) const; + double getTotalLateralFlowSnow() const; + double getTotalLateralFlowSoil() const; + void resetSlopeParFlux(); size_t getNumberOfElements() const; size_t getNumberOfNodes() const; bool isGlacier(const bool& hydro=false) const; bool hasSoilLayers() const; + double findMarkedReferenceLayer() const; size_t find_tag(const size_t& tag) const; + void reset_water_fluxes(); + const std::string toString() const; - friend std::iostream& operator<<(std::iostream& os, const SnowStation& data); - friend std::iostream& operator>>(std::iostream& is, SnowStation& data); + friend std::ostream& operator<<(std::ostream& os, const SnowStation& data); + friend std::istream& operator>>(std::istream& is, SnowStation& data); mio::StationData meta; ///< Station meta data double cos_sl; ///< Cosinus of slope angle, initialized once! size_t sector; ///< current slope sector of width 360./max(1, nSlopes-1) CanopyData Cdata; ///< Pointer to canopy data + SeaIce* Seaice; ///< Pointer to sea ice class double pAlbedo; ///< Parameterized snow albedo double Albedo; ///< Snow albedo used by the model double SoilAlb; ///< Soil albedo + double SoilEmissivity; ///< Soil emissivity double BareSoil_z0; ///< Bare soil roughness in m size_t SoilNode; ///< The top soil node, 0 in case of SNP_SOIL == 0 double Ground; ///< The ground height -- meaning the height of the top soil node @@ -512,26 +662,28 @@ class SnowStation { double mass_sum; ///< Total mass summing mass of snow elements double swe; ///< Total mass summing snow water equivalent of elements double lwc_sum; ///< Total liquid water in snowpack + double lwc_sum_soil; ///< Total liquid water in soil + double swc_sum_soil; ///< Total solid water in soil double hn; ///< Depth of new snow to be used on slopes double rho_hn; ///< Density of new snow to be used on slopes + double rime_hn; ///< rime of new snow to be used on slopes size_t ErosionLevel; ///< Element where snow erosion stopped previously for the drift index double ErosionMass; ///< Eroded mass either real or virtually (storage if less than one element) char S_class1; ///< Stability class based on hand hardness, grain class ... char S_class2; ///< Stability class based on hand hardness, grain class ... - double S_d; ///< Minimum Direct Action Stability Index ... - double z_S_d; ///< Depth of Minimum Direct Action Stability - double S_n; ///< Minimum Natural Stability Index - double z_S_n; ///< Depth of Minimum Natural Stability - double S_s; ///< Minimum Skier Stability Index (SSI) - double z_S_s; ///< Depth of Minimum SSI - double S_4; ///< stab_index4 - double z_S_4; ///< Depth of stab_index4 - double S_5; ///< stab_index5 - double z_S_5; ///< Depth of stab_index5 + double S_d; ///< Minimum deformation rate stability index + double z_S_d; ///< Depth of Minimum S_d + double S_n; ///< Minimum natural stability index + double z_S_n; ///< Depth of Minimum S_n + double S_s; ///< Minimum skier stability index sk38 (Sk38) + double z_S_s; ///< Depth of Minimum S_s + double S_4; ///< placeholder - currently Minimum structural stability index (SSI) + double z_S_4; ///< Depth of Minimum S_4 + double S_5; ///< placeholder + double z_S_5; ///< Depth of Minimum S_5 std::vector Ndata; ///< pointer to nodal data array (e.g. T, z, u, etc..) std::vector Edata; ///< pointer to element data array (e.g. Te, L, Rho, etc..) void *Kt; ///< Pointer to pseudo-conductivity and stiffnes matrix - size_t tag_low; ///< Lowest tag to dump, 0 means no tags at all double ColdContent; ///< Cold content of snowpack (J m-2) double ColdContentSoil; ///< Cold content of soil (J m-2) double dIntEnergy; ///< Internal energy change of snowpack (J m-2) @@ -542,8 +694,7 @@ class SnowStation { bool windward; ///< True for windward (luv) slope double WindScalingFactor; ///< Local scaling factor for wind at drift station double TimeCountDeltaHS; ///< Time counter tracking erroneous settlement in operational mode - - static const double comb_thresh_l, comb_thresh_ice, comb_thresh_water; + static const double comb_thresh_l_ratio, comb_thresh_ice, comb_thresh_water; static const double comb_thresh_dd, comb_thresh_sp, comb_thresh_rg; static const double thresh_moist_snow, thresh_moist_soil; static const size_t number_top_elements; @@ -552,8 +703,9 @@ class SnowStation { private: size_t nNodes; ///< Actual number of nodes; different for each exposition size_t nElems; ///< Actual number of elements (nElems=nNodes-1) - bool useCanopyModel, useSoilLayers; ///< The model includes soil layers - static double flexibleMaxElemLength(const double& depth); ///< When using REDUCE_N_ELEMENTS, this function determines the max element length, depending on depth inside the snowpack. + unsigned short int maxElementID; ///< maximum ElementID currently used (so each element can get a unique ID) + bool useCanopyModel, useSoilLayers, isAlpine3D; ///< The model includes soil layers + static double flexibleMaxElemLength(const double& depth, const double& comb_thresh_l); ///< When using REDUCE_N_ELEMENTS, this function determines the max element length, depending on depth inside the snowpack. }; /** @@ -564,6 +716,7 @@ class BoundCond { public: BoundCond() : lw_out(0.), lw_net(0.), qs(0.), ql(0.), qr(0.), qg(Constants::undefined) {} const std::string toString() const; + void reset(); double lw_out; ///< outgoing longwave radiation double lw_net; ///< net longwave radiation @@ -589,19 +742,24 @@ class SurfaceFluxes { MS_TOTALMASS, ///< This of course is the total mass of the snowpack at the present time MS_SWE, ///< This too, of course, but summing rho*L MS_WATER, ///< The total amount of water in the snowpack at the present time + MS_WATER_SOIL, ///< The total amount of water in the soil at the present time + MS_ICE_SOIL, ///< The total amount of ice in the soil at the present time MS_HNW, ///< Solid precipitation rate MS_RAIN, ///< Rain rate MS_WIND, ///< Mass loss rate due to wind erosion MS_EVAPORATION, ///< The mass loss or gain of the top element due to water evaporating MS_SUBLIMATION, ///< The mass loss or gain of the top element due to snow (ice) sublimating - MS_SNOWPACK_RUNOFF,///< The total mass loss of snowpack due to water transport (virtual lysimeter) + MS_SNOWPACK_RUNOFF,///< The mass loss of snowpack from snow melt due to water transport (virtual lysimeter) + MS_SURFACE_MASS_FLUX, ///< The total mass loss of snowpack due to water transport (virtual lysimeter) MS_SOIL_RUNOFF, ///< Equivalent to MS_SNOWPACK_RUNOFF but at bottom soil node + MS_FLOODING, ///< The mass gain due to adding ocean water to snow- seaice by flodding process + MS_ICEBASE_MELTING_FREEZING, ///< The mass gain/loss of the ice base due to melting-freezing N_MASS_CHANGES ///< Total number of different mass change types }; const std::string toString() const; - friend std::iostream& operator<<(std::iostream& os, const SurfaceFluxes& data); - friend std::iostream& operator>>(std::iostream& is, SurfaceFluxes& data); + friend std::ostream& operator<<(std::ostream& os, const SurfaceFluxes& data); + friend std::istream& operator>>(std::istream& is, SurfaceFluxes& data); SurfaceFluxes(); @@ -649,6 +807,7 @@ class SurfaceFluxes { //@} /// @brief Defines structure for snow profile layers +//HACK: could it be moved to plugins? (as well as Aggregate) class SnowProfileLayer { public: SnowProfileLayer(); @@ -695,158 +854,16 @@ class RunInfo { RunInfo& operator=(const RunInfo&) {return *this;} //everything is static, so we can not change anything const std::string version; ///< SNOWPACK version + const double version_num; ///< SNOWPACK version formatted as a number const mio::Date computation_date; ///< Date of computation const std::string compilation_date; ///< Date of compilation const std::string user; ///< logname of the user running the simulation + const std::string hostname; ///< hostname of the computer running the simulation private: + static double getNumericVersion(std::string version_str); static mio::Date getRunDate(); static std::string getCompilationDate(); }; -/// Structure of double values for output to SDB -struct ProcessDat { - ProcessDat() : date(), nHz(0), stat_abbrev(), loc_for_snow(0), loc_for_wind(0), - ch(0.), swe(0.), tot_lwc(0.), runoff(0.), dewpt_def(0.), hoar_size(0.), hoar_ind6(0.), hoar_ind24(0.), - wind_trans(0.), wind_trans24(0.), - hn_half_hour(0.), hn3(0.), hn6(0.), hn12(0.), hn24(0.), hn72(0.), hn72_24(0.), - psum_half_hour(0.), psum3(0.), psum6(0.), psum12(0.), psum24(0.), psum72(0.), - stab_class1(0), stab_class2(0), - stab_index1(0.), stab_height1(0.), stab_index2(0.), stab_height2(0.), stab_index3(0.), stab_height3(0.), stab_index4(0.),stab_height4(0.), stab_index5(0.), stab_height5(0.), - crust(0.), en_bal(0.), sw_net(0.), t_top1(0.), t_top2(0.), lwi_N(0.), lwi_S(0.), - dhs_corr(0.), mass_corr(0.) - {} - - mio::Date date; ///< Process date - unsigned int nHz; ///< Number of hazard steps - std::string stat_abbrev; - unsigned char loc_for_snow; - unsigned char loc_for_wind; - // Data - double ch; ///< height of snow HS (cm) - double swe; ///< snow water equivalent SWE (kg m-2) - double tot_lwc; ///< total liquid water content (kg m-2) - double runoff; ///< runoff (kg m-2) - double dewpt_def; ///< dew point deficit (degC) - double hoar_size; ///< 24 h surface hoar size (mm) - double hoar_ind6; ///< 6 h surface hoar index (kg m-2) - double hoar_ind24; ///< 24 h surface hoar index (kg m-2) - double wind_trans; ///< 6 h drifting snow index (cm) - double wind_trans24; ///< 24 h drifting snow index (cm) - double hn_half_hour; ///< half_hour depth of snowfall (cm) - double hn3; ///< 3 h depth of snowfall (cm) - double hn6; ///< 6 h depth of snowfall (cm) - double hn12; ///< 12 h depth of snowfall (cm) - double hn24; ///< 24 depth of snowfall (cm) - double hn72; ///< 72 depth of snowfall (cm) - double hn72_24; ///< 3 d sum of 24 h depth of snowfall (cm) - double psum_half_hour; ///< half_hour new snow water equivalent (kg m-2) - double psum3; ///< 3 h new snow water equivalent (kg m-2) - double psum6; ///< 6 h new snow water equivalent (kg m-2) - double psum12; ///< 12 h new snow water equivalent (kg m-2) - double psum24; ///< 24 h new snow water equivalent (kg m-2) - double psum72; ///< 72 h new snow water equivalent (kg m-2) - signed char stab_class1; ///< stability classes 1,3,5 - signed char stab_class2; ///< profile type 0..10 - double stab_index1; ///< deformation index Sdef - double stab_height1; ///< depth of stab_index1 (cm) - double stab_index2; ///< natural stability index Sn38 - double stab_height2; ///< depth of stab_index2 (cm) - double stab_index3; ///< skier stability index Sk38 - double stab_height3; ///< depth of stab_index3 (cm) - double stab_index4; ///< structural stability index SSI - double stab_height4; ///< depth of stab_index4 (cm) - double stab_index5; ///< none - double stab_height5; ///< depth of stab_index5 (cm) - // Special parameters - double crust; ///< height of melt-freeze crust on southern slope (cm) - double en_bal; ///< internal energy change (kJ m-2) - double sw_net; ///< surface energy input (kJ m-2) - double t_top1, t_top2; ///< snow temperatures at depth 1 & 2, respectively (degC) - double lwi_N, lwi_S; ///< liquid water index for northerly and southerly slopes, respectively. - // Control parameters - double dhs_corr; ///< snow depth correction in case of squezzing or blow-up (cm) - double mass_corr; ///< mass correction from either forced erosion and squeezing (neg) or blowing up (pos) (cm) -}; - -struct ProcessInd { - ProcessInd() : stat_abbrev(true), loc_for_snow(true), loc_for_wind(true), - ch(true), swe(true), tot_lwc(true), runoff(true), dewpt_def(true), - hoar_size(true), hoar_ind6(true), hoar_ind24(true), - wind_trans(true), wind_trans24(true), - hn3(true), hn6(true), hn12(true), hn24(true), hn72(true), hn72_24(true), psum3(true), psum6(true), psum12(true), psum24(true), psum72(true), - stab_class1(true), stab_class2(true), - stab_index1(true), stab_height1(true), stab_index2(true), stab_height2(true), stab_index3(true), stab_height3(true), stab_index4(true), stab_height4(true), stab_index5(true), stab_height5(true), - crust(true), en_bal(true), sw_net(true), t_top1(true), t_top2(true), lwi_N(true), lwi_S(true) - {} - - bool stat_abbrev; - bool loc_for_snow; - bool loc_for_wind; - // Data - bool ch; - bool swe; - bool tot_lwc; - bool runoff; - bool dewpt_def; - bool hoar_size; - bool hoar_ind6, hoar_ind24; - bool wind_trans, wind_trans24; - bool hn3, hn6, hn12, hn24, hn72; - bool hn72_24; - bool psum3, psum6, psum12, psum24, psum72; - bool stab_class1, stab_class2; - bool stab_index1, stab_height1; - bool stab_index2, stab_height2; - bool stab_index3, stab_height3; - bool stab_index4, stab_height4; - bool stab_index5, stab_height5; - bool crust; - bool en_bal; - bool sw_net; - bool t_top1, t_top2; - bool lwi_N, lwi_S; -}; - -/// @brief Class for recording reference properties of tagged elements -class Tag { - public: - Tag(); - - void compute_properties(const ElementData& Edata); - void reposition_tag(const bool& useSoilLayers, const double& z, SnowStation& Xdata); - - static const bool metamo_expl; ///< set while using the explicit metamorphism model - - std::string label; ///< Label for output file header - mio::Date date; ///< date at which to start tagging - //char label[MAX_STRING_LENGTH]; ///< Label for output file header - //double JulianDate; ///< Julian date at which to start tagging - - // Repositioning - size_t elem; ///< Index of tagged element - double previous_depth; ///< Last position of corresponding fixed rate sensor perpendicular to slope (m) - // Viscosity - double etaNS; ///< New snow viscosity according to M. Lehning - double etaMSU; ///< Snow viscosity (Montana model) - // Metamorphism - double ML2L; ///< layer to layer flux - double lp; ///< lattice constant -}; - -class TaggingData { - public: - TaggingData(const double& i_calculation_step_length); - void resize(size_t i_size); - void update_tags(const CurrentMeteo& Mdata, SnowStation& Xdata); - - bool useSoilLayers, surface_write; - double calculation_step_length; - size_t tag_low, tag_top, repos_low, repos_top; - std::vector tags; - - private: - size_t number_tags; -}; - #endif diff --git a/third_party/snowpack/DoxygenLayout.xml b/third_party/snowpack/DoxygenLayout.xml index 93eac9cd..0b76e1c4 100644 --- a/third_party/snowpack/DoxygenLayout.xml +++ b/third_party/snowpack/DoxygenLayout.xml @@ -1,25 +1,3 @@ - diff --git a/third_party/snowpack/Hazard.cc b/third_party/snowpack/Hazard.cc index 737c36aa..dd4ba3a8 100644 --- a/third_party/snowpack/Hazard.cc +++ b/third_party/snowpack/Hazard.cc @@ -112,6 +112,8 @@ void Hazard::actOnVector(std::vector& oldVector, const double& newValue, break; case noAction: break; + default: + InvalidArgumentException("Unknown action provided to actOnVector", AT); } } @@ -344,7 +346,7 @@ void Hazard::getHazardDataMainStation(ProcessDat& Hdata, ProcessInd& Hdata_ind, int e = (signed)nE-1; for (unsigned int kk = 0; kk <= 5; kk++) { while ((e >= signed(Xdata.SoilNode)) && ((Mdata.date.getJulian() - EMS[e].depositionDate.getJulian()) < (H_TO_D(t_hn[kk])))) { - sum_hn += EMS[e].L; + sum_hn += EMS[e].L; sum_precip += EMS[e].L * EMS[e].Rho; e--; } diff --git a/third_party/snowpack/Hazard.h b/third_party/snowpack/Hazard.h index c6551ec3..866ef392 100644 --- a/third_party/snowpack/Hazard.h +++ b/third_party/snowpack/Hazard.h @@ -29,10 +29,114 @@ #ifndef HAZARD_H #define HAZARD_H -#include "DataClasses.h" #include +#include "DataClasses.h" #include +/// Structure of double values for output to SDB +struct ProcessDat { + ProcessDat() : date(), nHz(0), stat_abbrev(), loc_for_snow(0), loc_for_wind(0), + ch(0.), swe(0.), tot_lwc(0.), runoff(0.), dewpt_def(0.), hoar_size(0.), hoar_ind6(0.), hoar_ind24(0.), + wind_trans(0.), wind_trans24(0.), + hn_half_hour(0.), hn3(0.), hn6(0.), hn12(0.), hn24(0.), hn72(0.), hn72_24(0.), + psum_half_hour(0.), psum3(0.), psum6(0.), psum12(0.), psum24(0.), psum72(0.), + stab_class1(0), stab_class2(0), + stab_index1(0.), stab_height1(0.), stab_index2(0.), stab_height2(0.), stab_index3(0.), stab_height3(0.), stab_index4(0.),stab_height4(0.), stab_index5(0.), stab_height5(0.), + crust(0.), en_bal(0.), sw_net(0.), t_top1(0.), t_top2(0.), lwi_N(0.), lwi_S(0.), + dhs_corr(0.), mass_corr(0.) + {} + + mio::Date date; ///< Process date + unsigned int nHz; ///< Number of hazard steps + std::string stat_abbrev; + unsigned char loc_for_snow; + unsigned char loc_for_wind; + // Data + double ch; ///< height of snow HS (cm) + double swe; ///< snow water equivalent SWE (kg m-2) + double tot_lwc; ///< total liquid water content (kg m-2) + double runoff; ///< runoff (kg m-2) + double dewpt_def; ///< dew point deficit (degC) + double hoar_size; ///< 24 h surface hoar size (mm) + double hoar_ind6; ///< 6 h surface hoar index (kg m-2) + double hoar_ind24; ///< 24 h surface hoar index (kg m-2) + double wind_trans; ///< 6 h drifting snow index (cm) + double wind_trans24; ///< 24 h drifting snow index (cm) + double hn_half_hour; ///< half_hour depth of snowfall (cm) + double hn3; ///< 3 h depth of snowfall (cm) + double hn6; ///< 6 h depth of snowfall (cm) + double hn12; ///< 12 h depth of snowfall (cm) + double hn24; ///< 24 depth of snowfall (cm) + double hn72; ///< 72 depth of snowfall (cm) + double hn72_24; ///< 3 d sum of 24 h depth of snowfall (cm) + double psum_half_hour; ///< half_hour new snow water equivalent (kg m-2) + double psum3; ///< 3 h new snow water equivalent (kg m-2) + double psum6; ///< 6 h new snow water equivalent (kg m-2) + double psum12; ///< 12 h new snow water equivalent (kg m-2) + double psum24; ///< 24 h new snow water equivalent (kg m-2) + double psum72; ///< 72 h new snow water equivalent (kg m-2) + signed char stab_class1; ///< stability classes 1,3,5 + signed char stab_class2; ///< profile type 0..10 + double stab_index1; ///< deformation index Sdef + double stab_height1; ///< depth of stab_index1 (cm) + double stab_index2; ///< natural stability index Sn38 + double stab_height2; ///< depth of stab_index2 (cm) + double stab_index3; ///< skier stability index Sk38 + double stab_height3; ///< depth of stab_index3 (cm) + double stab_index4; ///< structural stability index SSI + double stab_height4; ///< depth of stab_index4 (cm) + double stab_index5; ///< none + double stab_height5; ///< depth of stab_index5 (cm) + // Special parameters + double crust; ///< height of melt-freeze crust on southern slope (cm) + double en_bal; ///< internal energy change (kJ m-2) + double sw_net; ///< surface energy input (kJ m-2) + double t_top1, t_top2; ///< snow temperatures at depth 1 & 2, respectively (degC) + double lwi_N, lwi_S; ///< liquid water index for northerly and southerly slopes, respectively. + // Control parameters + double dhs_corr; ///< snow depth correction in case of squezzing or blow-up (cm) + double mass_corr; ///< mass correction from either forced erosion and squeezing (neg) or blowing up (pos) (cm) +}; + +struct ProcessInd { + ProcessInd() : stat_abbrev(true), loc_for_snow(true), loc_for_wind(true), + ch(true), swe(true), tot_lwc(true), runoff(true), dewpt_def(true), + hoar_size(true), hoar_ind6(true), hoar_ind24(true), + wind_trans(true), wind_trans24(true), + hn3(true), hn6(true), hn12(true), hn24(true), hn72(true), hn72_24(true), psum3(true), psum6(true), psum12(true), psum24(true), psum72(true), + stab_class1(true), stab_class2(true), + stab_index1(true), stab_height1(true), stab_index2(true), stab_height2(true), stab_index3(true), stab_height3(true), stab_index4(true), stab_height4(true), stab_index5(true), stab_height5(true), + crust(true), en_bal(true), sw_net(true), t_top1(true), t_top2(true), lwi_N(true), lwi_S(true) + {} + + bool stat_abbrev; + bool loc_for_snow; + bool loc_for_wind; + // Data + bool ch; + bool swe; + bool tot_lwc; + bool runoff; + bool dewpt_def; + bool hoar_size; + bool hoar_ind6, hoar_ind24; + bool wind_trans, wind_trans24; + bool hn3, hn6, hn12, hn24, hn72; + bool hn72_24; + bool psum3, psum6, psum12, psum24, psum72; + bool stab_class1, stab_class2; + bool stab_index1, stab_height1; + bool stab_index2, stab_height2; + bool stab_index3, stab_height3; + bool stab_index4, stab_height4; + bool stab_index5, stab_height5; + bool crust; + bool en_bal; + bool sw_net; + bool t_top1, t_top2; + bool lwi_N, lwi_S; +}; + /** @brief * * @ingroup postprocessing diff --git a/third_party/snowpack/Laws_sn.cc b/third_party/snowpack/Laws_sn.cc index 90f3e13b..b230140e 100644 --- a/third_party/snowpack/Laws_sn.cc +++ b/third_party/snowpack/Laws_sn.cc @@ -37,10 +37,9 @@ */ #include "Laws_sn.h" -#include "Constants.h" #include "Utils.h" +#include "Constants.h" #include "snowpackCore/Metamorphism.h" -#include "snowpackCore/Snowpack.h" //some physics are necessary using namespace std; using namespace mio; @@ -64,18 +63,6 @@ std::vector SnLaws::swa_pc; ///< fraction of sun power spectrum per band std::vector SnLaws::swa_fb; ///< fudge_bohren //@} -/** - * @name SOIL PARAMETERS - * - * @brief Define Method and Coefficents for the computation of the influence of soil water - * content on Evaporation from Bare Soil Layers: - * - Resistance Approach, see Laws_sn.c - * - Relative Humidity Approach, see Snowpack.cc - * - none, assume saturation pressure and no extra resistance - */ -//@{ -const SnLaws::soil_evap_model SnLaws::soil_evaporation = EVAP_RESISTANCE; - /// @brief Minimum soil surface resistance, 50 sm-1 (van den Hurk et al, 2000) const double SnLaws::rsoilmin = 50.0; @@ -193,13 +180,20 @@ bool SnLaws::setStaticData(const std::string& variant, const std::string& watert { current_variant = variant; - if (current_variant == "ANTARCTICA") { + if (current_variant == "ANTARCTICA" || current_variant == "POLAR") { t_term = t_term_arrhenius_critical; visc = visc_dflt; visc_ice_fudge = 9.45; visc_sp_fudge = 16.5; //visc_water_fudge is set to zero by default setfix = false; + if (watertransport_model == "RICHARDSEQUATION" ) { + // No sophisticated calibration was performed to find this value. The only issue is that a different water transport scheme leads to different settling behaviour. + // This value is chosen such that the melt curves in spring more or less resemble those of simulations with BUCKET. + visc_water_fudge = 45.; + } else { + visc_water_fudge = 33.; + } event = event_wind; event_wind_lowlim = 4.0; event_wind_highlim = 7.0; @@ -249,7 +243,7 @@ bool SnLaws::setStaticData(const std::string& variant, const std::string& watert /** * @name THERMAL CONDUCTIVITY OF ICE - * @brief Based on master thesis of Tobias Hipp, who used relationships by Ling & Yhang (2005). + * @brief Based on master thesis of Tobias Hipp, who used relationships by Ling & Zhang (2004). * @version 11.03 * @param Temperature Temperature (K) * @return Thermal conductivity of ice @@ -262,7 +256,7 @@ double SnLaws::conductivity_ice(const double& Temperature) /** * @name THERMAL CONDUCTIVITY OF WATER - * @brief Based on master thesis of Tobias Hipp, who used relationships by Ling & Yhang (2005). + * @brief Based on master thesis of Tobias Hipp, who used relationships by Ling & Zhang (2004). * @version 11.03 * @param Temperature Temperature (K) * @return Thermal conductivity of water @@ -297,13 +291,12 @@ double SnLaws::conductivity_water(const double& Temperature) * @param Tss Snow surface temperature (K) * @param Mdata */ -double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const std::string& i_albedo_parameterization, const std::string& i_albAverageSchmucki, +double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const std::string& i_albedo_parameterization, const std::string& i_albAverageSchmucki, const double& i_albNIED_av, const double& i_albedo_fixedValue, const ElementData& Edata, const double& Tss, const CurrentMeteo& Mdata, const bool& ageAlbedo) { double Alb = Constants::min_albedo; const double Ta = Mdata.ta; double age = (ageAlbedo)? Mdata.date.getJulian() - Edata.depositionDate.getJulian() : 0.; - if (i_snow_albedo == "FIXED") { Alb = i_albedo_fixedValue; } else if ((ageAlbedo && (age > 365.)) || (Edata.mk % 10 == 7)) { @@ -321,7 +314,7 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s + g*sqrt_age*Optim::pow2(Ta) + h*sqrt_age*Tss + i*sqrt_age*lwi + j*Optim::pow2(Ta)*Tss + k*Optim::pow2(Ta)*lwi + l*Tss*Mdata.rswr + m*Tss*lwi + n*Tss*Mdata.rh + o*Mdata.vw*Mdata.rh); - Alb = weight * Edata.dd * Snowpack::new_snow_albedo + (1. - weight * Edata.dd) * Alb1; + Alb = weight * Edata.dd * Constants::max_albedo + (1. - weight * Edata.dd) * Alb1; } else if (i_albedo_parameterization == "LEHNING_1") { double mf = 0.; @@ -334,10 +327,11 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s mf = 1.; // av *= exp(-age/1700.); } - const double Alb1 = Crho*Edata.Rho + Clwc*Edata.theta[WATER] + Cdd*Edata.dd + Csp*Edata.sp + const double Alb1 = Crho*Edata.Rho + Clwc*(Edata.theta[WATER]+Edata.theta[WATER_PREF]) + Cdd*Edata.dd + Csp*Edata.sp + Cmf*mf + Crb*Edata.rb + Cta*Ta + Ctss*Tss + Cv*Mdata.vw+ Cswout*Mdata.rswr + Cta_tss*Ta*Tss; - Alb = av + log(1.0 + Alb1); + if (Alb1 >= -1.) Alb = av + log(1.0 + Alb1); + Alb = std::max(Constants::min_albedo, Alb); } else if (i_albedo_parameterization == "LEHNING_2") { //TODO: this perfoms very badly (if not completly wrong) for (very?) wet snowpack @@ -353,7 +347,7 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s static const double Cage = -0.000575, Cta = -0.006, Cv = 0.00762, Clwc = -0.2735; static const double Crho = -0.000056, Crh = 0.0333, Crb = -0.301, Crg = 0.175; static const double Cdd = 0.064, Csp = -0.0736, Ctss = 0.00459, Cswout = -0.000101; - const double Alb1 = inter + Cage*age + Crho*Edata.Rho + Clwc*Edata.theta[WATER] + const double Alb1 = inter + Cage*age + Crho*Edata.Rho + Clwc*(Edata.theta[WATER]+Edata.theta[WATER_PREF]) + Cdd*Edata.dd + Csp*Edata.sp + Crg*Edata.rg + Crb*Edata.rb + Cta*Ta + Ctss*Tss + Cv*Mdata.vw + Cswout*Mdata.rswr + Crh*Mdata.rh; @@ -375,7 +369,7 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s static const double inter = 1.178904; static const double Cms = -5.691804e-02, Cage = -2.840603e-04, Crg = -1.029158e-01, Crho = -5.030213e-04, Cswin = -6.780479e-5; - const double moist_snow = (Edata.theta[WATER] > SnowStation::thresh_moist_snow)? 1. : 0.; + const double moist_snow = ((Edata.theta[WATER]+Edata.theta[WATER_PREF]) > SnowStation::thresh_moist_snow)? 1. : 0.; Alb1 = inter + Cms*moist_snow + Cage*age + Crg*(Edata.rg) + Crho*Edata.Rho + Cswin*Mdata.iswr; if (Alb1 > 0.) { @@ -396,7 +390,7 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s static const double inter = 1.148088; static const double Cms = -4.412422e-02, Cage = -1.523871e-03, Cogs = -1.099020e-01, Crho = -3.638010e-04, Cswin = -7.140708e-05; - const double moist_snow = (Edata.theta[WATER] > SnowStation::thresh_moist_snow)? 1. : 0.; + const double moist_snow = ((Edata.theta[WATER]+Edata.theta[WATER_PREF]) > SnowStation::thresh_moist_snow)? 1. : 0.; Alb1 = inter + Cms*moist_snow + Cage*age + Cogs*(Edata.ogs/2.) + Crho*Edata.Rho + Cswin*Mdata.iswr; if (Alb1 > 0.) { @@ -407,13 +401,13 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s } } else if (i_albedo_parameterization == "NIED") { // by H. Hirashima (NIED, Nagaoka, Japan) - static const double av = 0.75; - static const double inter = 1.005; - static const double Cage = -0.00016*10.0, Cta = -0.000249*2.0, Cv = 0.00578, Clwc = -2.15; - static const double Crho = -0.000047, Crh = 0.129, Crb = -0.306, Crg = 0.107; - static const double Cdd = 0.076, Csp = 0.00964, Ctss = -0.000166, Cswout = -1.8e-5; + const double av = i_albNIED_av; + const double inter = 1.005; + const double Cage = -0.00016*10.0, Cta = -0.000249*2.0, Cv = 0.00578, Clwc = -2.15; + const double Crho = -0.000047, Crh = 0.129, Crb = -0.306, Crg = 0.107; + const double Cdd = 0.076, Csp = 0.00964, Ctss = -0.000166, Cswout = -1.8e-5; - const double Alb1 = inter + Crho*Edata.Rho + Clwc*Edata.theta[WATER] + Cdd*Edata.dd + Csp*Edata.sp + const double Alb1 = inter + Crho*Edata.Rho + Clwc*(Edata.theta[WATER]+Edata.theta[WATER_PREF]) + Cdd*Edata.dd + Csp*Edata.sp + Crg*Edata.rg + Crb*Edata.rb + Cta*Ta + Ctss*Tss + Cv*Mdata.vw + Cswout*Mdata.rswr + Crh*Mdata.rh + Cage*age; @@ -428,7 +422,6 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s prn_msg(__FILE__, __LINE__, "err", Date(), "Albedo parameterization %s not implemented yet!", i_albedo_parameterization.c_str()); throw IOException("The required snow albedo model is not implemented yet!", AT); } - return(Alb); } @@ -441,12 +434,12 @@ double SnLaws::parameterizedSnowAlbedo(const std::string& i_snow_albedo, const s */ void SnLaws::compShortWaveAbsorption(const std::string& i_sw_absorption_scheme, SnowStation& Xdata, const double& I0) { - ElementData *EMS = &Xdata.Edata[0]; const size_t nE = Xdata.getNumberOfElements(); if (nE==0) return; + ElementData *EMS = &Xdata.Edata[0]; const size_t bottom_element = (Xdata.SoilNode > 0)? Xdata.SoilNode - 1 : Xdata.SoilNode; - for (size_t e = bottom_element; e < nE; e++) + for (size_t e = 0; e < nE; e++) EMS[e].sw_abs = 0.; // Compute absorbed radiation @@ -493,6 +486,8 @@ void SnLaws::compShortWaveAbsorption(const std::string& i_sw_absorption_scheme, */ void SnLaws::compAdvectiveHeat(SnowStation& Xdata, const double& advective_heat, const double& depth_begin, const double& depth_end) { + if (Xdata.getNumberOfElements() == 0) return; + ElementData *EMS = &Xdata.Edata[0]; NodeData *NDS = &Xdata.Ndata[0]; @@ -554,7 +549,7 @@ double SnLaws::compWindPumpingVelocity(const CurrentMeteo& Mdata, const double& */ double SnLaws::compWindGradientSnow(const ElementData& Edata, double& v_pump) { - const double v_EXt = SnLaws::wind_ext_coef * (Edata.Rho + 2e4 * Edata.theta[WATER]); + const double v_EXt = SnLaws::wind_ext_coef * (Edata.Rho + 2.e4 * (Edata.theta[WATER]+Edata.theta[WATER_PREF])); const double dv = v_pump * (1. - exp(-v_EXt * (Edata.L))); v_pump -= dv; @@ -567,33 +562,36 @@ double SnLaws::compWindGradientSnow(const ElementData& Edata, double& v_pump) * Kersten in "Geotechnical Engeneering for Cold Regions" article by Harlan and Nixon, * the water influence deduced from deVries and Afgan in "Heat and Mass Transfer in the Biosphere". * @version 11.03: thermal conductivity made temperature dependent. - * @param Edata - * @param dvdz Wind velocity gradient (s-1) + * @version 12.0: thermal conductivity model is now defined by a key SOIL_THERMAL_CONDUCTIVITY in SNOWPACK_ADVANCED + * @param[in] Edata + * @param[in] dvdz Wind velocity gradient (s-1) + * @param[in] soil_thermal_conductivity Thermal conductivity model to use (either "FITTED" or any other string) * @return Soil thermal conductivity (W K-1 m-1) */ -double SnLaws::compSoilThermalConductivity(const ElementData& Edata, const double& dvdz) +double SnLaws::compSoilThermalConductivity(const ElementData& Edata, const double& dvdz, + const std::string& soil_thermal_conductivity) { double C_eff_soil; //0 means no soil, 10000 means rock - if ((Edata.rg > 0.) && (Edata.rg < 10000.)) { + if ((Edata.rg > 0.) && (Edata.rg < 10000.) && soil_thermal_conductivity == "FITTED") { static const double c_clay = 1.3, c_sand = 0.27; static const double beta1 = 6., beta2 = 4.978, c_mineral = 2.9; const double weight = (c_clay - Edata.soil[SOIL_K]) / (c_clay - c_sand); - const double C_eff_soil_max = Edata.theta[SOIL] * c_mineral + Edata.theta[WATER] + const double C_eff_soil_max = Edata.theta[SOIL] * c_mineral + (Edata.theta[WATER]+Edata.theta[WATER_PREF]) * SnLaws::conductivity_water(Edata.Te) + Edata.theta[ICE] * SnLaws::conductivity_ice(Edata.Te); C_eff_soil = (beta1 + weight * beta2) * Edata.theta[ICE]; - if (Edata.theta[WATER] > SnowStation::thresh_moist_soil) { + if ((Edata.theta[WATER]+Edata.theta[WATER_PREF]) > SnowStation::thresh_moist_soil) { static const double alpha1 = 0.389, alpha2 = 0.3567, alpha3 = 61.61; - C_eff_soil += std::max( 0.27, (alpha1 + alpha2 * weight) * log(alpha3 * Edata.theta[WATER]) ); + C_eff_soil += std::max( 0.27, (alpha1 + alpha2 * weight) * log(alpha3 * (Edata.theta[WATER]+Edata.theta[WATER_PREF])) ); } else { C_eff_soil += 0.27; } C_eff_soil = std::min(C_eff_soil_max, C_eff_soil); } else { - C_eff_soil = Edata.soil[SOIL_K] + Edata.theta[WATER] * SnLaws::conductivity_water(Edata.Te) + C_eff_soil = Edata.soil[SOIL_K] + (Edata.theta[WATER]+Edata.theta[WATER_PREF]) * SnLaws::conductivity_water(Edata.Te) + Edata.theta[ICE] * SnLaws::conductivity_ice(Edata.Te); } @@ -609,6 +607,98 @@ double SnLaws::compSoilThermalConductivity(const ElementData& Edata, const doubl return(C_eff_soil); } +/** + * @brief Water vapor diffusion coefficient in soil. + * The formulation is based on Saito et al., 2006 "Numerical analysis of coupled water vapor + * and heat transport in the vadose zone", see eq. [14] and [15]. + * It is defined as the product of the tortuosity factor (-) as defined by Millington and Quirck (1961), + * the air-filled porosity (m3 m-3) and the diffusivity of water vapor in air (m2 s-1). + * @author Margaux Couttet + * @param Edata element data + * @return vapour diffusivity in soil (m2 s-1) + */ +double SnLaws::soilVaporDiffusivity(const ElementData& Edata) +{ + double tortuosity = (Edata.VG.theta_s > Constants::eps2)?(pow(Edata.theta[AIR], 7./3.)/pow(Edata.VG.theta_s, 2.)):(0.); + + return (tortuosity * Edata.theta[AIR] * Constants::diffusion_coefficient_in_air); +} + + +/** + * @brief Computes the enhancement factor for water vapor transport in soil. + * Derived from Cass et al., 1984 "Enhancement of thermal water vapor diffusion in soil", see eq. [19]. + * Describe the increase in thermal vapor flux as a result of liquid islands and increased + * temperature gradients in the air phase. + * @author Margaux Couttet + * @param Edata element data + * @param clay_fraction fraction of clay in the soil + * @return Enhancement factor (-) + */ +double SnLaws::compEnhanceWaterVaporTransportSoil(const ElementData& Edata, const double& clay_fraction) +{ + const double r = (Edata.theta[WATER]+Edata.theta[ICE]*Constants::density_ice/Constants::density_water)/Edata.VG.theta_s; + return ((Edata.VG.theta_s > Constants::eps2)?(9.5 + 3.*(r) - 8.5*exp(-1.*pow((1.+2.6/sqrt(clay_fraction))*r,4.))):(0.)); +} + +/** +* @brief Computes the soil THERMAL vapor hydraulic conductivity. +* Requires the use of RE to determine pressure head (Edata.h) and saturated water content (theta_s). +* The THERMAL vapor hydraulic conductivy formulation is based on Saito et al., 2006 +* "Numerical analysis of coupled water, vapor, and heat transport in the vadose zone", see eq. [13]. +* It is used to determine the flux density of water vapor in soil due to THERMAL gradient: q_vT = -Kvapor_T*gradT. +* The enhancement factor is used to describe the increase in the thermal vapor flux as a result of liquid islands +* and increased temperature gradients in the air phase (Philip and de Vries, 1957) +* The relative humidity is calculated from the pressure head (h), using a thermodynamic relationship between liquid water +* and water vapour in soil pores (Philip and de Vries, 1957) +* @author Margaux Couttet +* @param Edata_bot element data +* @param Edata_top element data +* @param Te_bot lower element temperature (K) +* @param Te_top upper element temperature (K) +* @param clay_fraction fraction of clay in the soil +* @return thermal vapor hydraulic conductivity (m2 K-1 s-1) +*/ +double SnLaws::compSoilThermalVaporConductivity(const ElementData& Edata_bot, const ElementData& Edata_top, const double& Te_bot, const double& Te_top, const double& clay_fraction) +{ + //Determine the nodal values by averaging between top and bottom elements + const double nodal_diffusivity = .5 * (SnLaws::soilVaporDiffusivity(Edata_top) + SnLaws::soilVaporDiffusivity(Edata_bot)); //(m2 s-1) + const double nodal_HR = .5 * (Edata_top.RelativeHumidity() + Edata_bot.RelativeHumidity()); //(-) + const double nodal_enhancement = .5 * (SnLaws::compEnhanceWaterVaporTransportSoil(Edata_top,clay_fraction) + + SnLaws::compEnhanceWaterVaporTransportSoil(Edata_bot,clay_fraction)); // (-) + + double dRhovs_dT = 0.; // change of water vapor density due to temperature gradient (kg m-3 K-1) + if (fabs(Te_top - Te_bot) > Constants::eps2) { // if no temperature difference between top and bottom nodes, the vapor density gradient remains zero + dRhovs_dT = (Atmosphere::waterVaporDensity(Te_top, Atmosphere::vaporSaturationPressure(Te_top)) - + Atmosphere::waterVaporDensity(Te_bot, Atmosphere::vaporSaturationPressure(Te_bot))) / (Te_top - Te_bot); + } + return (nodal_diffusivity/Constants::density_water * nodal_enhancement * nodal_HR * dRhovs_dT); +} + +/** + * @brief Computes the soil ISOTHERMAL vapor hydraulic conductivity. + * The ISOTHERMAL vapor hydraulic conductivy formulation is based on Saito et al., 2006 + * "Numerical analysis of coupled water, vapor, and heat transport in the vadose zone", see eq. [12]. + * It is used to determine the flux density of water vapor in soil due to MOISTURE gradient: q_vh = -Kvapor_h*gradH. + * @author Margaux Couttet + * @param Edata_bot element data + * @param Edata_top element data + * @param Te_bot lower element temperature (K) + * @param Te_top upper element temperature (K) + * @param T_node nodal temperature (K) + * @return isothermal vapor hydraulic conductivity (m s-1) + */ +double SnLaws::compSoilIsothermalVaporConductivity(const ElementData& Edata_bot, const ElementData& Edata_top, const double& Te_bot, const double& Te_top, const double& T_node) +{ + //Determine the nodal values by averaging between top and bottom elements + const double nodal_diffusivity = .5*(SnLaws::soilVaporDiffusivity(Edata_top) + SnLaws::soilVaporDiffusivity(Edata_bot)); //(m2 s-1) + const double nodal_vaporDensity = .5*(Atmosphere::waterVaporDensity(Te_top, Atmosphere::vaporSaturationPressure(Te_top)) + + Atmosphere::waterVaporDensity(Te_bot, Atmosphere::vaporSaturationPressure(Te_bot))); //(kg m-3) + const double nodal_HR = .5*(Edata_top.RelativeHumidity() + Edata_bot.RelativeHumidity()); //(-) + + return (nodal_diffusivity/Constants::density_water * nodal_vaporDensity * Constants::g/(Constants::gas_constant * T_node)) * nodal_HR; +} + /** * @brief Computes the enhancement factor for water vapor transport in wet snow * @version 9Y.mm @@ -668,7 +758,7 @@ double SnLaws::compSnowThermalConductivity(const ElementData& Edata, const doubl const double rg = MM_TO_M(Edata.rg); //Grain radius (m) const double rb = MM_TO_M(Edata.rb); //Bond radius (m) - const double Te = std::min(Edata.Te, Edata.melting_tk); //Element temperature (K) + const double Te = std::min(Edata.Te, Edata.meltfreeze_tk); //Element temperature (K) // Check for elements with no ice and assume they contain only water if (Edata.theta[ICE] < Snowpack::min_ice_content) @@ -697,7 +787,7 @@ double SnLaws::compSnowThermalConductivity(const ElementData& Edata, const doubl // Compute cross-sectional areas of conduction paths (m2) const double Ap = Metamorphism::csPoreArea(Edata); // (mm2) - const double Aiw = std::max(0., Edata.theta[WATER] * (1. / C1 - rg) / C1 * (Ap + Constants::pi * rg*rg)); + const double Aiw = std::max(0., Edata.theta[WATER] * (1. / C1)/(1. / C1 - rg) * (Ap + Constants::pi * rg*rg)); const double Aip = std::max(0., Constants::pi * (rg*rg - rb*rb) - Aiw); /* @@ -724,7 +814,7 @@ double SnLaws::compSnowThermalConductivity(const ElementData& Edata, const doubl const double C5 = (Constants::conductivity_ice * Constants::conductivity_water * Aiw) / (rg * Constants::conductivity_water + (1./C1 - rg) * Constants::conductivity_ice); - double C_eff = SnLaws::montana_c_fudge * C1 * (C2 + C3 + C4 + C5) * (2.0 - Edata.dd) * (1.0 + pow(Edata.theta[ICE], 1.7)) * (0.5 + Optim::pow2(Te/Edata.melting_tk) ); + double C_eff = SnLaws::montana_c_fudge * C1 * (C2 + C3 + C4 + C5) * (2.0 - Edata.dd) * (1.0 + pow(Edata.theta[ICE], 1.7)) * (0.5 + Optim::pow2(Te/Edata.meltfreeze_tk) ); if (!((C_eff < 5.*Constants::conductivity_ice) && (C_eff > 0.2*Constants::conductivity_air)) && show_warnings) { prn_msg(__FILE__, __LINE__, "wrn", Date(), "Conductivity out of range (0.2*Constants::conductivity_air=%.3lf, 5.*Constants::conductivity_ice=%.3lf):", 0.2 * Constants::conductivity_air, 5. * Constants::conductivity_ice); @@ -774,55 +864,55 @@ double SnLaws::compSensibleHeatCoefficient(const CurrentMeteo& Mdata, const Snow * ql = beta*(eA - eS) Latent heat transfer. eA and eS are the vapor * pressures of air and snow, respectively. * @version 9Y.mm + * @param soil_evaporation The evaporation method to be used * @param Mdata * @param Xdata * @param height_of_meteo_values Height at which meteo parameters are measured * @return Latent heat flux (W m-2) */ -double SnLaws::compLatentHeat_Rh(const CurrentMeteo& Mdata, SnowStation& Xdata, const double& height_of_meteo_values) +double SnLaws::compLatentHeat_Rh(const std::string soil_evaporation, + const CurrentMeteo& Mdata, SnowStation& Xdata, const double& height_of_meteo_values) { const size_t nElems = Xdata.getNumberOfElements(); const double T_air = Mdata.ta; const double Tss = Xdata.Ndata[nElems].T; + const double Tse = (nElems > 0) ? (Xdata.Edata[nElems-1].Te) : Constants::meltfreeze_tk; double eS; // Vapor Pressures - const double th_w_ss = (nElems>0)? Xdata.Edata[nElems-1].theta[WATER] : 0.; - // TODO The part below needs to be rewritten in a more consistent way !!! // In particular, look closely at the condition within compLatentHeat() const double eA = Mdata.rh * Atmosphere::vaporSaturationPressure(T_air); const double Vp1 = Atmosphere::vaporSaturationPressure(Tss); - const double Vp2 = Atmosphere::vaporSaturationPressure(Tss); //HACK something got lost here... + const double Vp2 = Atmosphere::vaporSaturationPressure(Tse); // First, the case of no snow if (Xdata.getNumberOfNodes() == Xdata.SoilNode + 1 && nElems > 0) { - if ( Tss < Xdata.Edata[nElems-1].melting_tk) { + if ( Tss < Xdata.Edata[nElems-1].meltfreeze_tk) { eS = Vp1 ; } else { /* * Soil evaporation can now be computed using the Relative Humidity approach below, * or a Resistance approach modifying the ql value instead of the eS. The latter - * function is defined in compLatentHeat, and the Switch SnLaws::soil_evaporation is found - * in Laws_sn.h + * function is defined in compLatentHeat, and the soil_evaporation key is read + * in snowpackCore/Snowpack.h */ - if (SnLaws::soil_evaporation==EVAP_RELATIVE_HUMIDITY && th_w_ss < Xdata.Edata[Xdata.SoilNode-1].soilFieldCapacity()) { - eS = Vp2 * 0.5 * ( 1. - cos (std::min(Constants::pi, th_w_ss * Constants::pi - / (Xdata.Edata[Xdata.SoilNode-1].soilFieldCapacity() * 1.6)))); + if (soil_evaporation=="EVAP_RELATIVE_HUMIDITY") { + eS = Vp2 * Xdata.Edata[Xdata.SoilNode-1].RelativeHumidity(); } else { eS = Vp2; } } } else { // for snow assume saturation - const double melting_tk = (nElems>0)? Xdata.Edata[nElems-1].melting_tk : Constants::melting_tk; - if (Tss < melting_tk) + const double meltfreeze_tk = (nElems > 0) ? Xdata.Edata[nElems-1].meltfreeze_tk : Constants::meltfreeze_tk; + if (Tss < meltfreeze_tk) eS = Vp1; else eS = Vp2; } // Now the latent heat - const double beta = SnLaws::compLatentHeat(Mdata, Xdata, height_of_meteo_values); + const double beta = SnLaws::compLatentHeat(soil_evaporation, Mdata, Xdata, height_of_meteo_values); return (beta * (eA - eS)); } @@ -836,7 +926,7 @@ double SnLaws::compLatentHeat_Rh(const CurrentMeteo& Mdata, SnowStation& Xdata, * is used to reduce the heat exchange coefficient in the case of evaporation: * c = 1/(Ra + Rsoil), where Ra = 1/c as computed above, and * Rsoil = 50 [s/m] * field_capacity_soil / theta_soil. \n - * A new switch SnLaws::soil_evaporation is defined in Constants.h to select method. + * A key SNOWPACK_ADVANCED::soil_evaporation is defined to select method. * The resistance formulation originates from van den Hurk et al.(2000) "Offline validation * of the ERA40 surface scheme": ECMWF Tech.Memo 295. \n * A difference from the RH method is that the surface vapour pressure is always assumed @@ -847,25 +937,27 @@ double SnLaws::compLatentHeat_Rh(const CurrentMeteo& Mdata, SnowStation& Xdata, * method should work in a discretized model, it is important to consider the difference * between vapour pressure at the surface and the average of the top soil layer. \n * The soil resistance is only used for bare soil layers, when TSS >= 0C and eSurf >= eAtm + * @param[in] soil_evaporation The evaporation method to be used * @param[in] Mdata * @param[in] Xdata * @param[in] height_of_meteo_values Height at which meteo parameters are measured * @return Latent heat flux (W m-2) */ -double SnLaws::compLatentHeat(const CurrentMeteo& Mdata, SnowStation& Xdata, const double& height_of_meteo_values) +double SnLaws::compLatentHeat(const std::string soil_evaporation, const CurrentMeteo& Mdata, SnowStation& Xdata, const double& height_of_meteo_values) { const size_t nElems = Xdata.getNumberOfElements(); + const bool SurfSoil = (nElems > 0) ? (Xdata.Edata[nElems-1].theta[SOIL] > 0.) : false; double c = compSensibleHeatCoefficient(Mdata, Xdata, height_of_meteo_values); - if ((Xdata.getNumberOfNodes() == Xdata.SoilNode + 1) && (nElems > 0) - && (Xdata.Ndata[nElems].T >= Xdata.Edata[nElems-1].melting_tk) - && (SnLaws::soil_evaporation == EVAP_RESISTANCE)) { + if (SurfSoil && (Xdata.Ndata[nElems].T >= Xdata.Edata[nElems-1].meltfreeze_tk) + && (soil_evaporation == "EVAP_RESISTANCE")) { + const double Tse = (nElems > 0) ? (Xdata.Edata[nElems-1].Te) : Constants::meltfreeze_tk; const double eA = Mdata.rh * Atmosphere::vaporSaturationPressure( Mdata.ta ); - const double eS = Atmosphere::vaporSaturationPressure( Xdata.Ndata[nElems].T ); + const double eS = Atmosphere::vaporSaturationPressure( Tse ); if (eS >= eA) { c = 1. / c + SnLaws::rsoilmin / std::max(SnLaws::relsatmin, std::min(1., - Xdata.Edata[nElems-1].theta[WATER] + (Xdata.Edata[nElems-1].theta[WATER]+Xdata.Edata[nElems-1].theta[WATER_PREF]) / Xdata.Edata[Xdata.SoilNode-1].soilFieldCapacity())); c = 1. / c; } @@ -896,8 +988,8 @@ double SnLaws::compLWRadCoefficient(const double& t_snow, const double& t_atm, c /** * @brief Event driven new-snow density - * @param i_event: - * - event_wind: rho = 250.3 kg m-3 @ 4 m s-1; rho = 338 kg m-3 @ 7 m s-1 Antarctica + * @param variant Snowpack variant (such as DEFAULT, POLAR...) + * @param i_event - event_wind: rho = 250.3 kg m-3 @ 4 m s-1; rho = 338 kg m-3 @ 7 m s-1 Antarctica * @param Mdata Meteorological input */ double SnLaws::newSnowDensityEvent(const std::string& variant, const SnLaws::EventType& i_event, @@ -929,7 +1021,7 @@ double SnLaws::newSnowDensityEvent(const std::string& variant, const SnLaws::Eve * @param RH Relative air humidity (1) * @param VW Mean wind velocity (m s-1) * @param HH Altitude a.s.l. (m) - * @param model Parameterization to be used + * @param i_hn_model Parameterization to be used * @return New snow density (kg m-3) */ double SnLaws::newSnowDensityPara(const std::string& i_hn_model, @@ -979,7 +1071,26 @@ double SnLaws::newSnowDensityPara(const std::string& i_hn_model, rho_hn = pow(10., arg); } else if (i_hn_model == "PAHAUT") { - rho_hn = 109. + 6.*(IOUtils::C_TO_K(TA) - Constants::melting_tk) + 26.*sqrt(VW); + rho_hn = 109. + 6.*(IOUtils::C_TO_K(TA) - Constants::meltfreeze_tk) + 26.*sqrt(VW); + + } else if (i_hn_model == "NIED") { + rho_hn = 62. + 3.6 * VW - 0.2 * TA; + + } else if (i_hn_model == "VANKAMPENHOUT") { + // van Kampenhout et al. (2017): https://doi.org/10.1002/2017MS000988 + // Eq. 4 in van Kampenhout et al. (2017): + const double rho_w = 266.861 * (pow((0.5 * (1. + tanh( VW / 5. ))), 8.8)); + double rho_t = 0.; + // Eq. 3 in van Kampenhout et al. (2017): + if (TA > 2.) { + rho_t = 50. + 1.7 * pow(17., 1.5); + } else if (TA > -15. && TA <= 2.) { + rho_t = 50. + 1.7 * pow((TA + 15.), 1.5); + } else { + rho_t = -3.8328 * TA - 0.0333 * TA * TA; + } + // Eq. 2 in van Kampenhout et al. (2017): + rho_hn = rho_t + rho_w; } else { prn_msg(__FILE__, __LINE__, "err", Date(), @@ -1021,7 +1132,7 @@ double SnLaws::newSnowDensityHendrikx(const double ta, const double tss, const d * - event_wind: Implemented 2009 by Christine Groot Zwaaftink for Antarctic variant * - MEASURED: Use measured new snow density read from meteo input * -Note: Set HN_DENSITY_FIXEDVALUE to 1. to use surface snow density as a "measured" value in case of missing values - * - FIXED: Use a fixed new snow density by assigning HN_DENSITY-FIXEDVALUE a value (default: 100 kg m-3, at least min_hn_density) + * - FIXED: Use a fixed new snow density by assigning HN_DENSITY_FIXEDVALUE a value (default: 100 kg m-3, at least min_hn_density) * @param i_hn_density type of density computation * @param i_hn_density_parameterization to use * @param i_hn_density_fixedValue to use @@ -1055,9 +1166,11 @@ double SnLaws::compNewSnowDensity(const std::string& i_hn_density, const std::st } else { rho = Constants::undefined; } - } else { // "FIXED" + } else if (i_hn_density == "FIXED") { rho = (i_hn_density_fixedValue != Constants::undefined) ? i_hn_density_fixedValue : Xdata.Edata[Xdata.getNumberOfElements()-1].Rho; rho = std::max(min_hn_density, rho); + } else { + throw UnknownValueException("Unknown new snow density option (HN_DENSITY) selected!", AT); } return rho; @@ -1088,31 +1201,38 @@ double SnLaws::NewSnowViscosityLehning(const ElementData& Edata) /** * @brief Computes the temperature term of viscosity + * The modifications for POLAR variant are described in: Steger CR, Reijmer CH, van den Broeke MR, Wever N, + * Forster RR, Koenig LS, Kuipers Munneke P, Lehning M, Lhermitte S, Ligtenberg SRM, Miège C and Noël BPY (2017) + * Firn Meltwater Retention on the Greenland Ice Sheet: A Model Comparison. Front. Earth Sci. 5:3. + * doi: 10.3389/feart.2017.00003: "To improve the agreement with observations, the tunable factors in the snow + * viscosity scheme (Groot Zwaaftink et al., 2013) for the activation energy of snow Qs and the critical exponent + * β are set to 16,080 J mol−1 and 0.3, respectively." * @version 11.06 * @param Te Element temperature (K) * @return Temperature term of snow viscosity */ double SnLaws::snowViscosityTemperatureTerm(const double& Te) { - static const double Q = 67000.; // Activation energy for defects in ice (J mol-1) + const double Q = (current_variant == "POLAR") ? (16080.) : (67000.); // Activation energy for defects in ice (J mol-1) switch (SnLaws::t_term) { - case t_term_arrhenius_critical: { - static const double Q_fac = 0.39; // Adjust Q to snow; from Schweizer et al. (2004): 0.24 - static const double criticalExp = 0.7; //0.5; //0.3; // - static const double T_r = 265.15; // Reference temperature (K), from Schweizer et al. (2004) + case t_term_arrhenius_critical: + { + const double Q_fac = 0.39; // Adjust Q to snow; from Schweizer et al. (2004): 0.24 + const double criticalExp = (current_variant == "POLAR") ? (0.3) : (0.7); //0.5; //0.3; // + const double T_r = 265.15; // Reference temperature (K), from Schweizer et al. (2004) return ((1. / SnLaws::ArrheniusLaw(Q_fac * Q, Te, T_r)) - * (0.3 * pow((Constants::melting_tk - Te), criticalExp) + 0.4)); + * (0.3 * pow((Constants::meltfreeze_tk - Te), criticalExp) + 0.4)); } case t_term_arrhenius: return (1. / SnLaws::ArrheniusLaw(Q, Te, 263.)); case t_term_stk: // Master thesis, September 2009 return (0.35 * sqrt(274.15 - Te)); case t_term_837: // as of revision 243, used up to revision 837 (deprecated) - return (9. - 8.7 * exp(0.015 * (Te - Constants::melting_tk))); + return (9. - 8.7 * exp(0.015 * (Te - Constants::meltfreeze_tk))); + default: + throw UnknownValueException("Unknown viscosity temperature dependency selected!", AT); } - - throw UnknownValueException("Unknown viscosity temperature dependency selected!", AT); } /** @@ -1147,39 +1267,41 @@ double SnLaws::loadingRateStressCALIBRATION(ElementData& Edata, const mio::Date& Edata.Eps_Dot = 0.; switch (visc) { - case visc_dflt: case visc_cal: case visc_ant: { // new calibration - const double age = std::max(0., date.getJulian() - Edata.depositionDate.getJulian()); - double sigReac = 15.5 * Edata.CDot * exp(-age/101.); - if (Edata.theta[WATER] > SnowStation::thresh_moist_snow) - sigReac *= 0.37 * (1. + Edata.theta[WATER]); // 0.2 ; 0.37 - Edata.Eps_Dot = sigReac; - return sigReac; - } - case visc_897: { // r897 - double sigMetamo = 0.; - const double age = std::max(0., date.getJulian() - Edata.depositionDate.getJulian()); - const double sigReac = 15.9 * Edata.CDot * exp(-age/101.); //tst2: 553. //tst1: 735. // - Edata.Eps_Dot = sigReac; - if (Edata.dd > Constants::eps /*((Edata->dd < 0.9) && (Edata->dd > 0.3))*/) { - sigMetamo = 37.0e3 * Metamorphism::ddRate(Edata); // 2010-10-23 + case visc_dflt: case visc_cal: case visc_ant: { // new calibration + const double age = std::max(0., date.getJulian() - Edata.depositionDate.getJulian()); + double sigReac = 15.5 * Edata.CDot * exp(-age/101.); + if (Edata.theta[WATER] > SnowStation::thresh_moist_snow) + sigReac *= 0.37 * (1. + Edata.theta[WATER]); // 0.2 ; 0.37 + Edata.Eps_Dot = sigReac; + return sigReac; } - return (sigReac + sigMetamo); - } - case visc_837: case visc_stk: { // as of revision 837 - double sig0 = 0.; - if ((Edata.dd < 0.9) && (Edata.dd > 0.3)) { - double facIS = 3.; // default r712 - if (SnLaws::visc == SnLaws::visc_stk) - facIS = -1.5; //-1.1; //-0.5; // - sig0 = facIS * Metamorphism::ddRate(Edata) * sigTension / MM_TO_M(Edata.rg); + case visc_897: { // r897 + double sigMetamo = 0.; + const double age = std::max(0., date.getJulian() - Edata.depositionDate.getJulian()); + const double sigReac = 15.9 * Edata.CDot * exp(-age/101.); //tst2: 553. //tst1: 735. // + Edata.Eps_Dot = sigReac; + if (Edata.dd > Constants::eps /*((Edata->dd < 0.9) && (Edata->dd > 0.3))*/) { + sigMetamo = 37.0e3 * Metamorphism::ddRate(Edata); // 2010-10-23 + } + return (sigReac + sigMetamo); } - return sig0; - } + case visc_837: case visc_stk: { // as of revision 837 + double sig0 = 0.; + if ((Edata.dd < 0.9) && (Edata.dd > 0.3)) { + double facIS = 3.; // default r712 + if (SnLaws::visc == SnLaws::visc_stk) + facIS = -1.5; //-1.1; //-0.5; // + sig0 = facIS * Metamorphism::ddRate(Edata) * sigTension / MM_TO_M(Edata.rg); + } + return sig0; + } + default: + //this should not be reached... + prn_msg(__FILE__, __LINE__, "err", Date(), "visc=%d not a valid choice for loadingRateStress!", visc); + throw IOException("Choice not implemented yet!", AT); } - //this should not be reached... - prn_msg(__FILE__, __LINE__, "err", Date(), "visc=%d not a valid choice for loadingRateStress!", visc); - throw IOException("Choice not implemented yet!", AT); + } /** @@ -1353,7 +1475,7 @@ double SnLaws::snowViscosityDEFAULT(ElementData& Edata) static const double sig1 = 0.5e6; // Unit stress from Sinha's formulation (Pa) const double visc_factor = 1./eps1Dot * Optim::pow3(sig1/visc_fudge); const double visc_macro = Edata.neck2VolumetricStrain(); // Macro-structure (layer) related factor - const double Te = std::min(Edata.Te, Edata.melting_tk); + const double Te = std::min(Edata.Te, Edata.meltfreeze_tk); double eta = (1. / visc_macro) * SnLaws::snowViscosityTemperatureTerm(Te) * visc_factor; static const double sigNeckYield = 0.4e6; // Yield stress for ice in neck (Pa) @@ -1413,7 +1535,7 @@ double SnLaws::snowViscosityCALIBRATION(ElementData& Edata, const mio::Date& dat static const double sig1 = 0.5e6; // Unit stress from Sinha's formulation (Pa) const double visc_factor = 1./eps1Dot * Optim::pow3(sig1/visc_fudge); const double visc_macro = Edata.neck2VolumetricStrain(); // Macro-structure (layer) related factor - const double Te = std::min(Edata.Te, Edata.melting_tk); + const double Te = std::min(Edata.Te, Edata.meltfreeze_tk); double eta = (1. / visc_macro) * SnLaws::snowViscosityTemperatureTerm(Te) * visc_factor; static const double sigNeckYield = 0.4e6; // Yield stress for ice in neck (Pa) @@ -1458,15 +1580,14 @@ double SnLaws::ArrheniusLaw(const double ActEnergy, const double T, const double */ double SnLaws::AirEmissivity(mio::MeteoData& md, const std::string& variant) { - const double ILWR = (md(MeteoData::ILWR)>1.)? md(MeteoData::ILWR) : IOUtils::nodata; + const double ILWR = md(MeteoData::ILWR); if (ILWR!=IOUtils::nodata) return AirEmissivity(ILWR, md(MeteoData::TA), variant); else { - const double cloudiness = (md(MeteoData::ILWR)>0. && md(MeteoData::ILWR)<=1.)? md(MeteoData::ILWR) : IOUtils::nodata; const double ilwr_p = Atmosphere::ILWR_parametrized(md.meta.position.getLat(), md.meta.position.getLon(), md.meta.position.getAltitude(), md.date.getJulian(), md.date.getTimeZone(), - md(MeteoData::RH), md(MeteoData::TA), md(MeteoData::ISWR), cloudiness); + md(MeteoData::RH), md(MeteoData::TA), md(MeteoData::ISWR), md(MeteoData::TAU_CLD)); return AirEmissivity(ilwr_p, md(MeteoData::TA), variant); } @@ -1548,14 +1669,14 @@ double SnLaws::SnowViscosityMSU(const ElementData& Edata) const double rc = Edata.concaveNeckRadius(); // concave radius of neck const double L = 2.*rg*rc/(rg + rc); // neck length - //define some physics + //define some constants static const double epdot = 1.76e-7; // unit strain rate (at stress = 1 MPa) (1/sec) static const double Q = 67000.; // J/mol static const double R = 8.31; // gas constant J/mol/K static const double Sig1 = 0.5e6; // unit stress Pa from Sinha's formulation static const double Tref = 263.0; // reference temperature in K static const double SneckYield = 0.4e6;// Yield stress for ice in neck (Pa) - static const double th_i_f = 0.35, f_2 = 0.02; // Empirical physics to control dry snow viscosity fudge + static const double th_i_f = 0.35, f_2 = 0.02; // Empirical constants to control dry snow viscosity fudge // First check to see if neck stress (Sneck) is >= SneckYield = 0.4 MPa. const double Sneck = (4.0/(N3*theta_i)) * Optim::pow2(rg/rb) * (-S); // Work with absolute value of stress @@ -1575,7 +1696,7 @@ double SnLaws::SnowViscosityMSU(const ElementData& Edata) Vis = (L/(2.*rg + L)) * epdot * exp( (Q/R)*(1./Tref - 1./T) ); Vis = 1. / (Vis * Optim::pow2(S) * Optim::pow3(Vis1)); } else { // NOT YIELDING, linear - // This viscocity is not a function of stress and is therefore a linear viscosity. Its value + // This viscosity is not a function of stress and is therefore a linear viscosity. Its value // depends on rb, rg, N3, theta_i and T. The expression ((N3*theta_i)/(4.))*(rb/rg)^2 // determines the neck stress relative to the snow stress. The expression ((rg + L)/(3.*L)) // relates the neck strains to the global volumetric strains. The term MONTANA_V_FUDGE is a diff --git a/third_party/snowpack/Laws_sn.h b/third_party/snowpack/Laws_sn.h index 1e09c87a..0655b6e3 100644 --- a/third_party/snowpack/Laws_sn.h +++ b/third_party/snowpack/Laws_sn.h @@ -29,6 +29,8 @@ #include #include "DataClasses.h" +#include "snowpackCore/Snowpack.h" //some constants are necessary + class SnLaws { @@ -68,19 +70,26 @@ class SnLaws { static double compSensibleHeatCoefficient(const CurrentMeteo& Mdata, const SnowStation& Xdata, const double& height_of_meteo_values); - static double compLatentHeat_Rh(const CurrentMeteo& Mdata, SnowStation& Xdata, + static double compLatentHeat_Rh(const std::string soil_evaporation, const CurrentMeteo& Mdata, SnowStation& Xdata, const double& height_of_meteo_values); - static double compLatentHeat(const CurrentMeteo& Mdata, SnowStation& Xdata, + static double compLatentHeat(const std::string soil_evaporation, const CurrentMeteo& Mdata, SnowStation& Xdata, const double& height_of_meteo_values); - static double compSoilThermalConductivity(const ElementData& Edata, const double& dvdz); + static double compSoilThermalConductivity(const ElementData& Edata, const double& dvdz, + const std::string& soil_thermal_conductivity); + + static double soilVaporDiffusivity(const ElementData& Edata); + static double compEnhanceWaterVaporTransportSoil(const ElementData& Edata,const double& clay_fraction); + static double compSoilThermalVaporConductivity(const ElementData& Edata_bot, const ElementData& Edata_top, const double& Te_bot, const double& Te_top,const double& clay_fraction); + static double compSoilIsothermalVaporConductivity(const ElementData& Edata_bot, const ElementData& Edata_top, const double& Te_bot, const double& Te_top, const double& T_node); + static double compSnowThermalConductivity(const ElementData& Edata, const double& dvdz, const bool& show_warnings=true); static double compEnhanceWaterVaporTransportSnow(const SnowStation& Xdata, const size_t& i_e); static double compLWRadCoefficient(const double& t_snow, const double& t_atm, const double& e_atm); - static double parameterizedSnowAlbedo(const std::string& i_albedo, const std::string& i_albedo_parameterization, const std::string& i_albAverageSchmucki, + static double parameterizedSnowAlbedo(const std::string& i_albedo, const std::string& i_albedo_parameterization, const std::string& i_albAverageSchmucki, const double& i_albNIED_av, const double& i_hn_albedo_fixedValue, const ElementData& Edata, const double& Tss, const CurrentMeteo& Mdata, const bool& ageAlbedo=true); static void compShortWaveAbsorption(const std::string& i_sw_absorption_scheme, SnowStation& Xdata, const double& I0); static void compAdvectiveHeat(SnowStation& Xdata, const double& advective_heat, @@ -100,7 +109,7 @@ class SnLaws { static double loadingRateStressCALIBRATION(ElementData& Edata, const mio::Date& date); static double snowViscosityFudgeDEFAULT(const ElementData& Edata); static double snowViscosityFudgeCALIBRATION(const ElementData& Edata, const mio::Date& date); - static double compSnowViscosity(const std::string& variant, const std::string& i_viscosity_model, const std::string& i_watertransport_model, + static double compSnowViscosity(const std::string& variant, const std::string& i_viscosity_model, const std::string& i_watertransport_model, ElementData& Edata, const mio::Date& date); static double snowViscosityDEFAULT(ElementData& Edata); static double snowViscosityKOJIMA(const ElementData& Edata); @@ -114,13 +123,6 @@ class SnLaws { static const double smallest_viscosity, field_capacity_soil; static const bool jordy_new_snow, wind_pump, wind_pump_soil; - private: - typedef enum SOIL_EVAP_MODEL { - EVAP_RESISTANCE, - EVAP_RELATIVE_HUMIDITY, - EVAP_NONE - } soil_evap_model; - static bool setStaticData(const std::string& variant, const std::string& watertransportmodel); static double newSnowDensityPara(const std::string& i_hn_model, @@ -140,7 +142,7 @@ class SnLaws { static bool setfix; static size_t swa_nBands; static std::vector swa_k, swa_pc, swa_fb; - static const soil_evap_model soil_evaporation; + //static const soil_evap_model soil_evaporation; static const double rsoilmin, relsatmin, alpha_por_tor_soil, pore_length_soil; static const double montana_c_fudge, montana_vapor_fudge, montana_v_water_fudge; static const double wind_ext_coef, displacement_coef, alpha_por_tor; diff --git a/third_party/snowpack/MainPage.h b/third_party/snowpack/MainPage.h index e09be47a..ae03e63b 100644 --- a/third_party/snowpack/MainPage.h +++ b/third_party/snowpack/MainPage.h @@ -31,18 +31,24 @@ /** * @mainpage Table of content * -# External Links - * -# Snowpack's home page - * -# Installation, compilation - * -# Getting help + * -# Snowpack's home page + * -# Installation, compilation + * -# Getting help * -# End User documentation * -# \subpage getting_started "Getting Started" - * -# Model principles + * -# Model principles & configuration * -# \subpage general "General concepts" + * -# Submodels documentation + * -# \subpage water_transport "Water Transport" + * -# \subpage water_vapor_transport "Water Vapor Transport" + * -# \subpage sea_ice "Sea Ice" + * -# \subpage technical_snow "Technical snow" * -# \subpage references "References" * -# \subpage uses "Use cases" * -# Inputs / Outputs * -# \subpage requirements "Data requirements" * -# \subpage snowpackio "Data file formats" + * -# \subpage advanced_setups "Advanced simulation setup" * -# Simulation tools * -# \subpage configuration "Configuring a simulation" * -# \subpage snowpack_visualization "Visualization of the results" @@ -73,13 +79,13 @@ */ /** - * @page getting_started Getting Started + * @page getting_started Simple simulations * After you installed a binary package or compiled and installed %Snowpack, you can run your first simulation. * Please make sure you properly set the proper environement variables for your operating system: * - on osX: set \em PATH and \em DYLD_FALLBACK_LIBRARY_PATH * - on Linux: set \em PATH and \em LD_LIBRARY_PATH if you install the package to a non-standard location * - on Windows: set \em PATH - * How to do this (and much more) is explained in the online documentation at https://models.slf.ch/p/snowpack/page/Getting-started/. + * How to do this (and much more) is explained in the online documentation at https://snowpack.slf.ch/Getting-started. * * @section Running_an_example Running an example simulation * In order to run an example simulation, please follow the steps below: @@ -89,23 +95,131 @@ * the last command line it contains into a terminal). You can also manually run %Snowpack, by typing something like * snowpack -c {ini file with path} -e {simulation end date in ISO format}. * -# Once the simulation is finished, the results are available in the \b output directory. This directory \b must exist before you run the simulation! - * -# The results can be visualized using the \ref sngui_config "sngui tool" by opening the .pro file that was generated in \b output. + * -# The results can be visualized using the niViz tool and opening the .pro file that was generated in \b output. * * @section Running_own_simulation Running your own simulation * Once you have been able to run an example simulation, you can try to run your own simulation. This involves the following steps: * -# First, gather the meteorological data that you need to drive the simulation. Please have a look at \subpage requirements "Data requirements"; - * -# Then, write the data in a format that meteoio can read for %Snowpack, for example SMET (see the file + * -# Then, write the data in a format that meteoio can read for %Snowpack, for example SMET (see the file * format specification included in the meteoio's documentation and follow it); - * -# Once your data is ready, you can \subpage configuration "configure your simulation", using inishell. Please keep in + * -# Once your data is ready, you can \subpage configuration "configure your simulation", using inishell. Please keep in * mind that the default choices in inishell are such that if you don't change them, a simple simulation should work. And do \b not change parameters in * the SnowpackAdvanced section! (this section is reserved for some specific use cases where a deeper control on the operation of the model is required). * -# Then, run the simulation from a terminal (after going to the directory where your simulation is) with a command line such as * snowpack -c {ini file with path} -e {simulation end date in ISO format}. * -# Once the simulation is finished, the results are available in the \b output directory. This directory \b must exist before you run the simulation! - * -# The results can be visualized using the \ref sngui_config "sngui tool" by opening the .pro file that was generated in \b output. + * -# The results can be visualized using the niViz tool and opening the .pro file that was generated in \b output. + * + * @section model_workflow Simulation workflow + * When running a simulation, it is important to keep in mind that the model is organized as several modules that interract together. It is possible to configure + * some parameters for the various modules and to enable/disable modules. Some modules can be used outside of Snowpack (like + * MeteoIO that is used in various applications or libSnowpack that is used by Alpine3D). + * More complex simulation workflows (such as spatial resampling or one-way coupling with other numerical models) are presented in the + * \subpage advanced_setups "Advanced simulation setup" page. + * + * \image html simulation_workflow.svg "Simulation workflow" width=900px + * \image latex simulation_workflow.eps "Simulation workflow" width=0.9\textwidth + * + * We provide various tools to help you manage this simulation workflow: + * - MeteoIO for preparing the forcings (basically you can take the raw data out of the data + * logger and do everything you need with MeteoIO all the way to providing the data to Snowpack; + * - Inishell to prepare your configuration files; + * - niViz to visualize the simulation outputs or prepare customized input profiles; + * - snowpat as Python modules for handling smet meterological forcings + * and pro snowpack outputs; + * - and even a new file format (an evolution of SMET to make it more generic and more formaly standardized), the + * interoperable CSV (iCSV) format! * */ + /** + * @page advanced_setups Advanced simulation setup + * + * @section virtual_stations Spatial resampling + * Through MeteoIO, it is possible to force Snowpack with data that has not been measured locally. The forcings are then extracted for example from gridded data (such as the outputs of + * weather forecasting models or reanalysis models) or by spatially interpolating stations' data to the point of interest. For the data extraction or interpolation, please + * refer to MeteoIO's documentation section "Spatial resampling" (see for example the current stable release + * documentation). + * + * \image html virtual_stations.svg "Spatial resampling" width=900px + * \image latex virtual_stations.eps "Spatial resampling" width=0.9\textwidth + * + * The SLOPE_FROM_SNO configuration key in the [Input] section controls whether slope angle and azimuth are taken from the sno file or from + * the meteorological forcings (in this case, automatically extracted from either the gridded data or from the DEM used for the spatial interpolations). + * Please note that this won't change the meteorological forcings (they are always only valid for their associated coordinates on flat field) but will allow + * Snowpack to reproject the precipitation and radiation fields on the slope (angle and azimuth) of your choice. + * + * When relying on spatially interpolated values, it is often necessary to first run Snowpack at the real forcing locations in order to generate easier to interpolate fields + * (such as ISWR, ILWR, PSUM) and then run the virtual stations by spatially interpolating the computed variables. In this case, it is recommended to run the first set + * of simulations with the following set of keys: + * @code + * [Output] + * TS_WRITE = TRUE + * TS_FORMAT = SMET + * TS_DAYS_BETWEEN = 0.04166667 ;so we get hourly values + * + * OUT_CANOPY = FALSE + * OUT_HAZ = FALSE + * OUT_SOILEB = FALSE + * OUT_HEAT = FALSE + * OUT_T = FALSE + * OUT_STAB = FALSE + * OUT_LW = TRUE + * OUT_SW = TRUE + * OUT_MASS = TRUE + * OUT_METEO = TRUE + * + * AVGSUM_TIME_SERIES = TRUE + * CUMSUM_MASS = FALSE + * PRECIP_RATES = FALSE + * @endcode + * + * And the second set of simulations (ie the ones relying on spatially interpolated forcings) with this set of keys: + * @code + * [Input] + * METEO = SMET + * + * [InputEditing] + * *::EDIT1 = MOVE + * *::ARG1::DEST = PSUM_S + * *::ARG1::SRC = MS_Snow + * + * *::EDIT2 = MOVE + * *::ARG2::DEST = PSUM_L + * *::ARG2::SRC = MS_Rain + * + * *::EDIT3 = MOVE + * *::ARG3::DEST = HS + * *::ARG3::SRC = HS_meas + * + * *::EDIT4 = MOVE + * *::ARG4::DEST = TSG + * *::ARG4::SRC = T_bottom + * + * *::EDIT5 = MOVE + * *::ARG5::DEST = TSS + * *::ARG5::SRC = TSS_meas + * + * *::EDIT6 = KEEP + * *::ARG6::PARAMS = TA TSS TSG RH ISWR ILWR HS VW DW PSUM_S PSUM_L PSUM PSUM_PH + * + * *::EDIT7 = CREATE + * *::ARG7::PARAM = PSUM_PH + * *::ARG7::ALGORITHM = PRECSPLITTING + * *::ARG7::TYPE = THRESH + * *::ARG7::SNOW = 274.35 + * + * *::EDIT8 = CREATE + * *::ARG8::PARAM = PSUM + * *::ARG8::ALGORITHM = PRECSPLITTING + * *::ARG8::TYPE = THRESH + * *::ARG8::SNOW = 274.35 + * + * [SNOWPACK] + * ENFORCE_MEASURED_SNOW_HEIGHTS = FALSE + * @endcode + */ + /** * @page general General concepts * The one-dimensional snow cover model SNOWPACK (Lehning et al., 1999; Bartelt and Lehning, 2002; Lehning et al., 2002a, b), @@ -115,7 +229,7 @@ * calculations for arctic areas (Meirold-Mautner and Lehning, 2003) and calculations of chemical solute transport in snow (Waldner et al., 2003). * * @section physical_processes Physical processes - * \image html physical_processes.png "Principal physical processes included in the SNOWPACK model" + * \image html physical_processes.svg "Principal physical processes included in the SNOWPACK model" width=900px * \image latex physical_processes.eps "Principal physical processes included in the SNOWPACK model" width=0.9\textwidth * * A graphical review of the physical processes described by the SNOWPACK model is given in the above figure. SNOWPACK is based on a Lagrangian @@ -126,7 +240,7 @@ * * @section model_structure Structure of the physical modeling * @subsection model_foundations Model Foundations - * \image html snowpack_column.png "The SNOWPACK soil/snow/canopy column" + * \image html snowpack_column.svg "The SNOWPACK soil/snow/canopy column" width=300px * \image latex snowpack_column.eps "The SNOWPACK soil/snow/canopy column" width=0.5\textwidth * The SNOWPACK model is built around a 1D soil/snow/canopy column (see figure above). This in effect neglects lateral transfers and only considers vertical * gradients and transfers. The snow is modeled as a three phase porous medium (ice/liquid water/water vapor) but can also contain an arbitrary amount of soil @@ -153,7 +267,7 @@ * an albedo and short wave absorption parametrization and a snowdrift model. * - some post-processing models will be added to provide more relevant outputs: a hardness model, several snow stability index, a snow classification. * - * \image html snowpack_physics.png "Structure of the SNOWPACK model" + * \image html snowpack_physics.svg "Structure of the SNOWPACK model" width=1000px * \image latex snowpack_physics.eps "Structure of the SNOWPACK model" width=0.9\textwidth * * The user can configure variants of these basic model concepts. The way of interaction is primarily through a configuration file but also changes to the source code by @@ -161,9 +275,9 @@ * * @subsection model_ebalance Energy Balance * The figure below shows the various fluxes that are part of the energy balance of the SNOWPACK model. These are available in the output files as well as - * through the sngui interface. + * through the niViz interface. * - * \image html energy_balance.png "Energy Balance components of the SNOWPACK model" + * \image html energy_balance.svg "Energy Balance components of the SNOWPACK model" width=900px * \image latex energy_balance.eps "Energy Balance components of the SNOWPACK model" width=0.9\textwidth * */ @@ -279,24 +393,36 @@ * - air temperature (TA) * - relative humidity (RH) * - wind speed (VW) - * - incoming short wave radiation (ISWR) and/or reflected short wave radiation (RSWR) - * - incoming long wave radiation (ILWR) and/or surface temperature (TSS) + * - incoming short wave radiation (ISWR) and/or reflected short wave radiation (RSWR) or net short wave radiation (it must be called NET_SW in Smet files). + * - incoming long wave radiation (ILWR) and/or surface temperature (TSS) [*] * - precipitation (PSUM) and/or snow height (HS) - * - ground temperature (TSG, if available) + * - ground temperature (TSG, if available. Otherwise, you will have to use MeteoIO's + * data generators to generate a value) or geothermal heat flux * - snow temperatures at various depths (TS1, TS2, etc if available and only for comparisons, see section \ref SnowSoilTemperatures) * * These parameters should best be available at a hourly time step and preferably in MKSA units - * (please check the MeteoIO plugins documentation for specific cases, like GRIB, NetCDF... that are automatically handled). + * (please check the MeteoIO plugins documentation for specific cases, like GRIB, NetCDF... that are automatically handled). Please have a look + * at the \ref snowpackio "other input parameters" that are required to run your simulation! + * + * [*] Please note that it is possible to parametrize the incoming long wave radiation (ILWR) from the short wave radiation, obviously + * with reduced performance compared to measured ILWR. This is achieved by configuring a + * data generator in MeteoIO such as an + * all sky parametrization. if ISWR is available, + * this is straightforward: the clearness index iswr_meas / iswr_pot gives the cloudiness which is used by a ilwr parametrization. + * If only RSWR is available, at each timestep Snowpack computes the matching iswr based on its modelled albedo iswr = rswr / albedo_mod and + * then calls all data generator that you may have defined for ILWR (which now have access to ISWR). It is also possible to use such a + * data generator directly on rswr (thus based on a fixed soil or snow albedo to internally compute iswr) but this is less performant... * * @section data_preparation Data preparation - * In order to help %Snowpack handle the (sometimes broken) data sets to be used in a simulation, the MeteoIO library is used. + * In order to help %Snowpack handle the (sometimes broken) data sets to be used in a simulation, the MeteoIO library is used. * This enables %Snowpack to get data from a variety of sources (several input file formats, connection to a database, connection to a web service) and to * pre-process real-world data, by filtering the data on the fly and by resampling the data on the fly. Please read the MeteoIO documentation (available - * online for the last official release) to learn about - * the supported file formats, the available filters and resampling/re-accumulation strategies. + * online for the last official release) to learn about + * the supported file formats, the available filters and resampling/re-accumulation strategies as well as the available parametrizations that can help generate + * some otherwise missing data (either from other parameters or fully synthetic, as last resort). * * It is recommended to prepare the data in the - * SMET file format for its ease of use. + * SMET file format for its ease of use. * * @section data_recomendations Data recommendations * In case incoming and reflected short wave radiation as well as incoming long wave radiation are all @@ -320,7 +446,7 @@ * done with great care (the model performing various checks on the physical consistency of the input data, it \b will exclude data points that are not consistent * with the other parameters. For example, precipitation occuring simultaneously with quite dry air will be refused). * - * \image html clear_sky.png "Data consistency check" + * \image html clear_sky.svg "Data consistency check" width=20% * \image latex clear_sky.eps "Data consistency check" width=0.9\textwidth * For example, the figure above allows to check the following points: * - the (solid) precipitation are synchronized with the major snow height increase - this is consistent; @@ -338,7 +464,7 @@ * label the columns as TS1, TS2, TS3, etc. If you use the snio format, refer to the documentation. * User defined positions (m) should be provided in the SnowpackAdvanced section of the \em "io.ini" file, * for example, FIXED_POSITIONS = "0.25 0.50 -0.10": - * - positive values refer to heigths measured from the ground surface (snow only) + * - positive values refer to heights measured from the ground surface (snow only) * - negative values refer to depths measured from either the ground surface or the snow surface in case no soil * layers are present * - A sensor must at least be covered by MIN_DEPTH_SUBSURF (m) snow for its temperature to be output. @@ -359,8 +485,10 @@ * the section "Available data generators and usage" for the full list of available generators): * @code * [Generators] - * PSUM_PH::generators = PPHASE - * PSUM_PH::PPHASE = RANGE 273.35 275.35 + * PSUM_PH::GENERATOR1 = PRECSPLITTING + * PSUM_PH::ARG1::TYPE = RANGE + * PSUM_PH::ARG1::SNOW = 273.35 + * PSUM_PH::ARG1::RAIN = 275.35 * @endcode * */ @@ -370,10 +498,11 @@ * The configuration for a given simulation is kept in a ".ini" file (see http://en.wikipedia.org/wiki/INI_file). This is an ascii file that contains * keys/values structured by sections. This can be easily edited with a simple text editor. More information about the structure of the file and how to generally deal * with it can be found in MeteoIO's documentation (section "How to build your io.ini configuration file"). However, it is recommended to use the inishell tool for - * generating the configuration file for %Snowpack in order to prevent missing important keys, etc + * generating the configuration file for %Snowpack in order to prevent missing important keys, etc Please read MeteoIO's documentation (specially the "general + * Concepts" introduction)! * * @section inishell_config The inishell tool - * It is highly recommended to use the Inishell tool to generate these ini files + * It is highly recommended to use the Inishell tool to generate these ini files * in order to reduce editing errors. This tool also allows you to edit an existing file in order to change the configuration. * \image html inishell.png "inishell overview" * \image latex inishell.eps "inishell overview" width=0.9\textwidth @@ -390,26 +519,38 @@ * The %Snowpack_advanced section contains settings that previously required to edit the source code and recompile the model. Since these settings * deeply transform the operation of the model, please refrain from using them if you are not absolutely sure of what you are doing. * + * @section soil_hydraulic_properties Setting soil hydraulic properties with Richards Equation + * When selecting `WATERTRANSPORTMODEL_SOIL = RICHARDSEQUATION`, the grain size (`rg`) of the soil layers in the `*.sno` file is used to determine the water retention properties of the soil, according to the following values: + *
+ * + * + * + * + * + * + * + * + * + *
Soil type definitions
Soil type rgSoil type rg
ORGANIC0.2SANDYCLAYLOAM6.5
CLAY0.5SANDYLOAM7.5
CLAYLOAM1.5SILT8.5
LOAM2.5SILTYCLAY9.5
LOAMYSAND3.5SILTYCLAYLOAM10.5
SAND4.5SILTLOAM11.5
SANDYCLAY5.5WFJGRAVELSAND12.5
+ * + * Notes: + * - Here, the soil types refer to the ROSETTA Class Average Hydraulic Parameters. + * - When using Richards equation, theta[SOIL] is set according to the soil type and the values specified in the *.sno file will be ignored. + * - WFJGRAVELSAND is a special type created for initial simulations for Weissfluhjoch. In later simulations, LOAMYSAND has been used for Weissfluhjoch. */ /** * @page snowpack_visualization Visualization tools * The simulation outputs are usually saved in \a ".pro" files for the time resolved profiles and \a ".met" files for the meteorological data time series * (see section \subpage snowpackio "Snowpack file formats"). These files can be processed with some scripts, relying on GNU plot or R for generating graphs - * but are usually viewed with a graphical application. Two such applications are currently available: the legacy SnGUI Java tool and the newly developed - * SnopViz javascript tool. + * but are usually viewed with a graphical application such as the open source, online niViz application. * - * @section sngui_config The sngui tool - * This java application can be downloaded after registering (and requesting access) on the web site. - * \image html sngui_overview_small.png "sngui overview" - * \image latex sngui_overview.eps "sngui overview" width=0.9\textwidth - * - * @section snopviz The SnopViz tool + * @section niviz The niViz tool * This javascript application work in any sufficiently recent web browser ( firefox >= 33.0, Safari >= 5.1, Internet Explorer >= 11.0, - * Chrome >= 38). You can either use it online and then open your profile to visualize or you can - * download a pre-packaged version that can be installed for offline use on your computer. - * \image html snopviz_small.png "SnopViz overview" - * \image latex snopviz.eps "SnopVizi overview" width=0.9\textwidth + * Chrome >= 38). You can either use it online and then open your profile to visualize or you can + * download a pre-packaged version that can be installed for offline use on your computer. + * \image html niviz.png "niViz overview" + * \image latex niviz.eps "niViz overview" width=0.9\textwidth * */ diff --git a/third_party/snowpack/Meteo.cc b/third_party/snowpack/Meteo.cc index bd212b8f..26b6b8ca 100644 --- a/third_party/snowpack/Meteo.cc +++ b/third_party/snowpack/Meteo.cc @@ -28,9 +28,9 @@ #include using namespace mio; +#include "Meteo.h" #include "Constants.h" #include "Laws_sn.h" -#include "Meteo.h" #include "Utils.h" /************************************************************ @@ -38,12 +38,14 @@ using namespace mio; ************************************************************/ Meteo::Meteo(const SnowpackConfig& cfg) - : canopy(cfg), roughness_length(0.), height_of_wind_value(0.), adjust_height_of_wind_value(true), stability(MO_MICHLMAYR), - research_mode(false), useCanopyModel(false) + : canopy(cfg), dataGenerator(nullptr), roughness_length(0.), height_of_wind_value(0.), + variant(), stability(MO_HOLTSLAG), research_mode(false), useCanopyModel(false) { const std::string stability_model = cfg.get("ATMOSPHERIC_STABILITY", "Snowpack"); stability = getStability(stability_model); + cfg.getValue("VARIANT", "SnowpackAdvanced", variant); + //Initial estimate of the roughness length for the site; will be adjusted iteratively, default value and operational mode: 0.002 m cfg.getValue("ROUGHNESS_LENGTH", "Snowpack", roughness_length); @@ -52,11 +54,33 @@ Meteo::Meteo(const SnowpackConfig& cfg) //Define the heights of the meteo measurements above ground (m). Required for surface energy exchange computation and for drifting and blowing snow. cfg.getValue("HEIGHT_OF_WIND_VALUE", "Snowpack", height_of_wind_value); - cfg.getValue("ADJUST_HEIGHT_OF_WIND_VALUE", "SnowpackAdvanced", adjust_height_of_wind_value); cfg.getValue("RESEARCH", "SnowpackAdvanced", research_mode); } +Meteo::Meteo(const Meteo& mt) + : canopy(mt.canopy), dataGenerator(nullptr), roughness_length(mt.roughness_length), height_of_wind_value(mt.height_of_wind_value), + variant(mt.variant), stability(mt.stability), research_mode(mt.research_mode), useCanopyModel(mt.useCanopyModel) {} + +Meteo& Meteo::operator=(const Meteo& mt) +{ + canopy = mt.canopy; + dataGenerator = nullptr; + roughness_length = mt.roughness_length; + height_of_wind_value = mt.height_of_wind_value; + variant = mt.variant; + stability = mt.stability; + research_mode = mt.research_mode; + useCanopyModel = mt.useCanopyModel; + + return *this; +} + +Meteo::~Meteo() +{ + if (dataGenerator!=nullptr) delete dataGenerator; +} + /** * @brief Parse the given string an return the matching atmospheric stability algorithm * @param[in] stability_model atmospheric stability model specification @@ -149,11 +173,11 @@ void Meteo::MOStability(const ATM_STABILITY& use_stability, const double& ta_v, psi_m = psi_s = 0.; return; } - + ustar = Constants::karman * vw / (z_ratio - psi_m); const double Tstar = Constants::karman * (t_surf_v - ta_v) / (z_ratio - psi_s); const double stab_ratio = -Constants::karman * zref * Tstar * Constants::g / (t_surf * Optim::pow2(ustar)); - + if (stab_ratio > 0.) { // stable switch(use_stability) { case MO_HOLTSLAG: { @@ -162,18 +186,19 @@ void Meteo::MOStability(const ATM_STABILITY& use_stability, const double& ta_v, * exp(-0.35 * stab_ratio) + 10.71); return; } - + case MO_STEARNS: { - // Stearns & Weidner, 1993 + // Stearns & Weidner, 1993, eq (9), note ln x^2 in the paper is ln(x^2) not ln^2(x) const double dummy1 = pow((1. + 5. * stab_ratio), 0.25); - psi_m = log(1. + dummy1) * log(1. + dummy1) + log(1. + Optim::pow2(dummy1)) - - 2. * atan(dummy1) - 1.3333; + psi_m = log(Optim::pow2(1. + dummy1)) + log(1. + Optim::pow2(dummy1)) + - 2. * atan(dummy1) - 4./3. * Optim::pow3(dummy1) + 0.8247; + // Stearns & Weidner, 1993, eq (10), note ln x^2 in the paper is ln(x^2) not ln^2(x) const double dummy2 = Optim::pow2(dummy1); - psi_s = log(1. + dummy2) * log(1. + dummy2) - - 2. * dummy2 - 0.66667 * Optim::pow3(dummy2) + 1.2804; + psi_s = log(Optim::pow2(1. + dummy2)) + - 2. * dummy2 - 2./3. * Optim::pow3(dummy2) + 1.2804; return; } - + case MO_MICHLMAYR: { //default, old MO // Stearns & Weidner, 1993 modified by Michlmayr, 2008 const double dummy1 = pow((1. + 5. * stab_ratio), 0.25); @@ -184,34 +209,34 @@ void Meteo::MOStability(const ATM_STABILITY& use_stability, const double& ta_v, - 1. * dummy2 - 0.3 * Optim::pow3(dummy2) + 1.2804; return; } - + case MO_LOG_LINEAR: { //log_linear psi_m = psi_s = -5.* stab_ratio; return; } - + case MO_SCHLOEGL_UNI: { //schloegl univariate: bin univariate 2/3 datasets psi_m = -1.62 * stab_ratio; psi_s = -2.96 * stab_ratio; return; } - + case MO_SCHLOEGL_MULTI: { //All multivariate 2/3 without offset psi_m = - 65.35 *(ta_v - t_surf_v)/(0.5 * (ta_v + t_surf_v)) + 0.0017 * zref * Constants::g/pow(vw,2); psi_s = - 813.21 *(ta_v - t_surf_v)/(0.5 *(ta_v + t_surf_v)) - 0.0014 * zref * Constants::g/pow(vw,2); return; } - + case MO_SCHLOEGL_MULTI_OFFSET: { //All multivariate 2/3 with offset psi_m = -0.69 - 15.47 * (ta_v - t_surf_v)/(0.5 * (ta_v + t_surf_v)) + 0.0059 * zref * Constants::g/pow(vw,2); psi_s = 6.73 -688.18 * (ta_v - t_surf_v)/(0.5 * (ta_v + t_surf_v)) - 0.0023 * zref * Constants::g/pow(vw,2); return; } - + default: throw InvalidArgumentException("Unsupported atmospheric stability parametrization", AT); } @@ -220,9 +245,9 @@ void Meteo::MOStability(const ATM_STABILITY& use_stability, const double& ta_v, const double dummy1 = pow((1. - 15. * stab_ratio), 0.25); psi_m = 2. * log(0.5 * (1. + dummy1)) + log(0.5 * (1. + Optim::pow2(dummy1))) - 2. * atan(dummy1) + 0.5 * Constants::pi; - // Stearns & Weidner, 1993, for scalars - const double dummy2 = pow((1. - 22.5 * stab_ratio), 0.33333); - psi_s = pow(log(1. + dummy2 + Optim::pow2(dummy2)), 1.5) - 1.732 * atan(0.577 * (1. + 2. * dummy2)) + 0.1659; + // Stearns & Weidner, 1993, eq (8) for scalars, note ln x^2 in the paper is ln(x^2) not ln^2(x) + const double dummy2 = pow((1. - 22.5 * stab_ratio), 1./3.); + psi_s = log(pow(1. + dummy2 + Optim::pow2(dummy2), 1.5)) - 1.732 * atan(0.577 * (1. + 2. * dummy2)) + 0.1659; } } @@ -238,6 +263,9 @@ void Meteo::MicroMet(const SnowStation& Xdata, CurrentMeteo &Mdata, const bool& { static const unsigned int max_iter = 100; + //Adapting the roughness length value depending on the presence or absence of snow + const double rough_len=((Xdata.cH - Xdata.Ground) > 0.03)?roughness_length:Xdata.BareSoil_z0; + // Ideal approximation of pressure and vapor pressure const double p0 = Atmosphere::stdAirPressure(Xdata.meta.position.getAltitude()); const double sat_vap = Atmosphere::vaporSaturationPressure(Mdata.ta); @@ -249,7 +277,12 @@ void Meteo::MicroMet(const SnowStation& Xdata, CurrentMeteo &Mdata, const bool& const double t_surf_v = t_surf * (1. + 0.377 * sat_vap / p0); // Adjust for snow height if fixed_height_of_wind=false - const double zref = (adjust_VW_height)? std::max(0.5, height_of_wind_value - (Xdata.cH - Xdata.Ground)) : height_of_wind_value ; + const double zref = (adjust_VW_height) + ? std::max( + 0.5, + height_of_wind_value - (Xdata.cH - Xdata.Ground + ( (Xdata.findMarkedReferenceLayer() == Constants::undefined) ? (0.) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground) )) + ) + : height_of_wind_value ; // In case of ventilation ... Wind pumping displacement depth (m) const double d_pump = (SnLaws::wind_pump)? SnLaws::compWindPumpingDisplacement(Xdata) : 0.; @@ -257,13 +290,13 @@ void Meteo::MicroMet(const SnowStation& Xdata, CurrentMeteo &Mdata, const bool& // initial guess (neutral) static const double eps1 = 1.e-3; double psi_m = 0., psi_s = 0.; - const double z_ratio = log((zref - d_pump) / roughness_length); + const double z_ratio = log((zref - d_pump) / rough_len); double ustar_old, ustar = Constants::karman * vw / (z_ratio - psi_m); //at first, psi_m=0 unsigned int iter = 0; do { iter++; ustar_old = ustar; - + // Stability corrections: compute ustar, psi_s & potentially psi_m if (stability==RICHARDSON) { RichardsonStability(ta_v, t_surf_v, zref, vw, z_ratio, ustar, psi_s); //compute ustar & psi_s @@ -279,15 +312,15 @@ void Meteo::MicroMet(const SnowStation& Xdata, CurrentMeteo &Mdata, const bool& prn_msg(__FILE__, __LINE__, "wrn", Mdata.date, "Stability correction did not converge (azi=%.0lf, slope=%.0lf) --> assume neutral", Xdata.meta.getAzimuth(), Xdata.meta.getSlopeAngle()); - Mdata.z0 = roughness_length; + Mdata.z0 = rough_len; Mdata.ustar = Constants::karman * vw / z_ratio; Mdata.psi_s = 0.; return; } // Save the values in the global Mdata data structure to use it later - Mdata.ustar = ustar; - Mdata.z0 = roughness_length; + Mdata.ustar = Constants::karman * vw / (z_ratio - psi_m); + Mdata.z0 = rough_len; Mdata.psi_s = psi_s; } @@ -341,14 +374,20 @@ bool Meteo::compHSrate(CurrentMeteo& Mdata, const SnowStation& Xdata, const doub * @param Mdata meteorological forcing * @param Xdata snow profile data * @param runCanopyModel should the canopy module also be called? + * @param adjust_height_of_wind_value should the height of wind values be adjusted? */ -void Meteo::compMeteo(CurrentMeteo &Mdata, SnowStation &Xdata, const bool& runCanopyModel) +void Meteo::compMeteo(CurrentMeteo &Mdata, SnowStation &Xdata, const bool runCanopyModel, + const bool adjust_height_of_wind_value) { + // adjust_height_of_wind_value should be passed externally in order to allow to change it for each + // pixel in Alpine3D + bool canopy_status = true; if (useCanopyModel && runCanopyModel) { // The canopy model should not necessarily be called at every call to compMeteo - canopy.runCanopyModel(Mdata, Xdata, roughness_length, height_of_wind_value, adjust_height_of_wind_value); + canopy_status = canopy.runCanopyModel(Mdata, Xdata, roughness_length, height_of_wind_value, + adjust_height_of_wind_value); } - if (!(useCanopyModel) || Xdata.Cdata.zdispl < 0.) { + if (!(useCanopyModel) || canopy_status==false) { MicroMet(Xdata, Mdata, adjust_height_of_wind_value); } } @@ -394,7 +433,7 @@ void Meteo::compRadiation(const SnowStation &station, mio::SunObject &sun, Snowp const double hs = (use_hs_meas)? station.mH - station.Ground : station.cH - station.Ground; const double iswr_factor = Mdata.rswr / (dir_h+diff+Constants::eps); //avoiding "0/0" - if (hs<0.1 && Mdata.rh<0.7 && iswr_factor<0.3) { + if ((hs>0 && hs<0.1) && Mdata.rh<0.7 && iswr_factor<0.3) { dir_h = H_direct; diff = H_diffuse; Mdata.iswr = dir_h+diff; @@ -405,6 +444,22 @@ void Meteo::compRadiation(const SnowStation &station, mio::SunObject &sun, Snowp cfg.addKey("SW_MODE", "Snowpack", "BOTH"); // as both Mdata.iswr and Mdata.rswr were reset } } + + //if needed and possible, recompute ilwr and ea now that we have a good iswr (computed from rswr with a good parametrized albedo) + if (Mdata.poor_ea) { + if (dataGenerator==nullptr) dataGenerator = new mio::DataGenerator(cfg, std::set({"ILWR"})); + mio::MeteoData md(Mdata.date, station.meta); + md("TA") = Mdata.ta; + md("TSS") = Mdata.tss; + md("RH") = Mdata.rh; + md("HS") = Mdata.hs; + md("ISWR") = Mdata.iswr; + md("RSWR") = Mdata.rswr; + std::vector vecMeteo( {md} ); + + dataGenerator->fillMissing( vecMeteo ); + Mdata.ea = SnLaws::AirEmissivity(vecMeteo.front(), variant); + } Mdata.diff = diff; Mdata.dir_h = dir_h; diff --git a/third_party/snowpack/Meteo.h b/third_party/snowpack/Meteo.h index c21acc38..85fece8f 100644 --- a/third_party/snowpack/Meteo.h +++ b/third_party/snowpack/Meteo.h @@ -29,9 +29,9 @@ #include -#include "DataClasses.h" #include "SnowpackConfig.h" #include "snowpackCore/Canopy.h" +#include "DataClasses.h" class Meteo { public: @@ -49,11 +49,15 @@ class Meteo { } ATM_STABILITY; Meteo(const SnowpackConfig& i_cfg); + Meteo(const Meteo& mt); + Meteo& operator=(const Meteo& mt); /// -#include "Constants.h" #include "Saltation.h" +#include "Constants.h" #include "Utils.h" #include @@ -63,11 +63,15 @@ const double Saltation::salt_height = 0.07; * non-static section * ************************************************************/ -Saltation::Saltation(const SnowpackConfig& cfg) : saltation_model() +static std::string get_model(const SnowpackConfig& cfg) { - cfg.getValue("SALTATION_MODEL", "SnowpackAdvanced", saltation_model); + std::string model; + cfg.getValue("SALTATION_MODEL", "SnowpackAdvanced", model); + return model; } +Saltation::Saltation(const SnowpackConfig& cfg) : saltation_model( get_model(cfg) ) {} + /** * @brief Returns the wind profile * @param z @@ -116,7 +120,7 @@ double Saltation::sa_vw2(const double& z, const double& tauA, const double& tauS double u = 0., z_act = z0; while (z_act < z) { - const double dz = 0.00002; + static const double dz = 0.00002; z_act += dz; const double ustarz = ustar * (1. - (1. - sqrt(r)) * exp(-z_act / hs)); const double dudz = ustarz / Saltation::karman / z_act; @@ -146,8 +150,8 @@ bool Saltation::sa_Traject(const double& u0, const double& angle_e_rad, const do const double& tauA, const double& tauS, const double& z0, double& ubar, double& u_i, double& angle_i_rad, double& t_i, double& z_max) { - const double DT = 0.0005; //time step in seconds - const double vis = 1.74e-5; //viscosity + static const double DT = 0.0005; //time step in seconds + static const double vis = 1.74e-5; //viscosity // Initialize velocities of particle and position double xdot = u0 * cos(angle_e_rad); @@ -286,8 +290,8 @@ double Saltation::sa_AeroEntrain(const double& z0, const double& tauS, const dou const double u0 = Saltation::ratio_ve_ustar * sqrt((tauS - tau_th) / Constants::density_air); // u0 = 3.7*sqrt(tauS-tau_th); - const double eps = 0.001; - const int maxit = 40; + static const double eps = 0.001; + static const int maxit = 40; int iter=0; double tauA_old, Nae; double tauA = .5 * (tauS + tau_th); @@ -391,14 +395,18 @@ int Saltation::sa_TestSaltation(const double& z0, const double& tauS, const doub * @return bool */ bool Saltation::compSaltation(const double& i_tauS, const double& tau_th, const double& slope_angle, const double& dg, - double& massflux, double& c_salt) + double& massflux, double& c_salt) const { if (saltation_model == "SORENSEN") { // Default model - // Sorensen + // Sorensen (1991), parameters from Lehning et al. (2008) + // Note that this equatin is expressed in the wrong units. See comment in: https://doi.org/10.5194/gmd-2022-28-RC1. The coefficients + // should have been converted from g/cm/s to kg/m/s. According to Vionnet, 2012 (https://pastel.archives-ouvertes.fr/tel-00781279v3/document, + // Fig. 5.3) SORENSEN2004 implemented below may be considered a better option. const double tauS = i_tauS; const double ustar = sqrt(tauS / Constants::density_air); const double ustar_thresh = sqrt(tau_th / Constants::density_air); if (ustar > ustar_thresh) { + // Eq. 2 in Lehning et al. (2008) [http://doi.org/10.1016/j.coldregions.2007.05.012]: massflux = 0.0014 * Constants::density_air * ustar * (ustar - ustar_thresh) * (ustar + 7.6*ustar_thresh + 205.); c_salt = massflux / ustar*0.001; // Arbitrary Scaling to match Doorschot concentration } else { @@ -406,8 +414,22 @@ bool Saltation::compSaltation(const double& i_tauS, const double& tau_th, const c_salt = 0.; } } + else if (saltation_model == "SORENSEN2004") { + // Sorensen (2004), parameters from Vionnet et al. (2014) + const double tauS = i_tauS; + const double ustar = sqrt(tauS / Constants::density_air); + const double ustar_thresh = sqrt(tau_th / Constants::density_air); + if (ustar > ustar_thresh) { + // Eq. 11 in Vionnet et al. (2014) [https://doi.org/10.5194/tc-8-395-2014]: + massflux = Constants::density_air / Constants::g * Optim::pow3(ustar) * (1. - Optim::pow2(ustar_thresh / ustar)) * (2.6 + 2.5 * Optim::pow2(ustar_thresh / ustar) + 2. * ustar_thresh / ustar); + c_salt = massflux / ustar*0.001; // Arbitrary Scaling to match Doorschot concentration + } else { + massflux = 0.; + c_salt = 0.; + } + } else if (saltation_model == "DOORSCHOT") { // Judith Doorschot's model - int k = 5; + int k = 5; // Initialize Shear Stress Distribution const double taumean = i_tauS; const double taumax = 15.* i_tauS; diff --git a/third_party/snowpack/Saltation.h b/third_party/snowpack/Saltation.h index 1112c7f8..74b3e688 100644 --- a/third_party/snowpack/Saltation.h +++ b/third_party/snowpack/Saltation.h @@ -34,31 +34,31 @@ class Saltation { Saltation(const SnowpackConfig& i_cfg); bool compSaltation(const double& tauS, const double& tau_th, const double& slope_angle, const double& dg, - double& massflux, double& c_salt); + double& massflux, double& c_salt) const; static const double karman; static const double z0_salt; private: - double sa_vw(const double& z, const double& tauA, const double& tauS, const double& z0, + static double sa_vw(const double& z, const double& tauA, const double& tauS, const double& z0, const double& u_start, const double& slope_angle); - double sa_vw2(const double& z, const double& tauA, const double& tauS, const double& z0, + static double sa_vw2(const double& z, const double& tauA, const double& tauS, const double& z0, const double& u_start, const double& slope_angle); - bool sa_Traject(const double& u0, const double& angle_e_rad, const double& slope_angle, const double& dg, + static bool sa_Traject(const double& u0, const double& angle_e_rad, const double& slope_angle, const double& dg, const double& tauA, const double& tauS, const double& z0, double& ubar, double& u_i, double& angle_i_rad, double& t_i, double& z_max); - double sa_MassFlux(const double& z0, const double& tauS, const double& tauA, const double& slope_angle, + static double sa_MassFlux(const double& z0, const double& tauS, const double& tauA, const double& slope_angle, const double& dg, const double& tau_th, double& z_max, double& ubar, double& cs); - double sa_AeroEntrain(const double& z0, const double& tauS, const double& slope_angle, const double& dg, + static double sa_AeroEntrain(const double& z0, const double& tauS, const double& slope_angle, const double& dg, const double& tau_th, double& flux, double& z_max, double& ubar, double& cs); - int sa_TestSaltation(const double& z0, const double& tauS, const double& tauA, const double& slope_angle, + static int sa_TestSaltation(const double& z0, const double& tauS, const double& tauA, const double& slope_angle, const double& dg, const double& tau_th, double& z_max, double& ubar); - std::string saltation_model; + const std::string saltation_model; static const double hs_frac, elas, angle_ej, ratio_ve_ustar, salt_height; static const int strong, weak; }; diff --git a/third_party/snowpack/SnowDrift.cc b/third_party/snowpack/SnowDrift.cc index 0356714a..77efa394 100644 --- a/third_party/snowpack/SnowDrift.cc +++ b/third_party/snowpack/SnowDrift.cc @@ -43,36 +43,38 @@ const bool SnowDrift::msg_erosion = false; * non-static section * ************************************************************/ -SnowDrift::SnowDrift(const SnowpackConfig& cfg) : saltation(cfg), - enforce_measured_snow_heights(false), snow_redistribution(false), snow_erosion(false), alpine3d(false), - sn_dt(0.), nSlopes(0) +static bool get_bool(const SnowpackConfig& cfg, const std::string& key, const std::string& section) { - cfg.getValue("ALPINE3D", "SnowpackAdvanced", alpine3d); - - // See Snowpack.cc for a description - cfg.getValue("ENFORCE_MEASURED_SNOW_HEIGHTS", "Snowpack", enforce_measured_snow_heights); + bool value; + cfg.getValue(key, section, value); + return value; +} - /* - * Number of stations incl. the main station: at least 1, either 3, 5, 7 or 9 for SNOW_REDISTRIBUTION - * - 1: real simulation at main station (flat field or slope. In the latter case virtual slopes are somewhat odd (see also PERP_TO_SLOPE) - * - 3: real simulation at main station (flat field) plus 2 virtual slopes - * - 5: real simulation at main station (flat field) plus 4 virtual slopes - * - 7: real simulation at main station (flat field) plus 6 virtual slopes - * - 9: real simulation at main station (flat field) plus 8 virtual slopes - */ - cfg.getValue("NUMBER_SLOPES", "SnowpackAdvanced", nSlopes); +static bool get_redistribution(const SnowpackConfig& cfg) +{ + bool redistribution = false; + const int nSlopes = cfg.get("NUMBER_SLOPES", "SnowpackAdvanced"); // Defines whether real snow erosion at main station or/and redistribution on virtual slopes (default in operational mode) // should happen under blowing snow conditions. - cfg.getValue("SNOW_EROSION", "SnowpackAdvanced", snow_erosion); + //cfg.getValue("SNOW_EROSION", "SnowpackAdvanced", snow_erosion); if (nSlopes>1) - cfg.getValue("SNOW_REDISTRIBUTION", "SnowpackAdvanced", snow_redistribution); + cfg.getValue("SNOW_REDISTRIBUTION", "SnowpackAdvanced", redistribution); + + return redistribution; +} +static double get_sn_dt(const SnowpackConfig& cfg) +{ //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH const double calculation_step_length = cfg.get("CALCULATION_STEP_LENGTH", "Snowpack"); - sn_dt = M_TO_S(calculation_step_length); + return M_TO_S(calculation_step_length); } +SnowDrift::SnowDrift(const SnowpackConfig& cfg) : saltation(cfg), + enforce_measured_snow_heights( get_bool(cfg, "ENFORCE_MEASURED_SNOW_HEIGHTS", "Snowpack") ), snow_redistribution( get_redistribution(cfg) ), snow_erosion( get_bool(cfg, "SNOW_EROSION", "SnowpackAdvanced") ), alpine3d( get_bool(cfg, "ALPINE3D", "SnowpackAdvanced") ), + sn_dt( get_sn_dt(cfg) ) {} + /** * @brief Computes the local mass flux of snow * @bug Contribution from suspension not considered yet! @@ -81,7 +83,7 @@ SnowDrift::SnowDrift(const SnowpackConfig& cfg) : saltation(cfg), * @param angle Slope angle (deg) * @return Saltation mass flux (kg m-1 s-1) */ -double SnowDrift::compMassFlux(const ElementData& Edata, const double& ustar, const double& slope_angle) +double SnowDrift::compMassFlux(const ElementData& Edata, const double& ustar, const double& slope_angle) const { // Compute basic quantities that are needed: friction velocity, z0, threshold vw // For now assume logarithmic wind profile; TODO change this later @@ -129,7 +131,7 @@ double SnowDrift::compMassFlux(const ElementData& Edata, const double& ustar, co * @param Sdata * @param forced_massErode if greater than 0, force the eroded mass to the given value (instead of computing it) */ -void SnowDrift::compSnowDrift(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& forced_massErode) +void SnowDrift::compSnowDrift(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& forced_massErode) const { size_t nE = Xdata.getNumberOfElements(); vector& NDS = Xdata.Ndata; diff --git a/third_party/snowpack/SnowDrift.h b/third_party/snowpack/SnowDrift.h index 618ce53b..2d6d26b7 100644 --- a/third_party/snowpack/SnowDrift.h +++ b/third_party/snowpack/SnowDrift.h @@ -23,8 +23,8 @@ #include -#include "DataClasses.h" #include "Saltation.h" +#include "DataClasses.h" #include "SnowpackConfig.h" #include @@ -40,18 +40,17 @@ class SnowDrift { public: SnowDrift(const SnowpackConfig& i_cfg); - void compSnowDrift(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& cumu_psum); + void compSnowDrift(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& cumu_psum) const; static const double schmidt_drift_fudge; private: - double compMassFlux(const ElementData& Edata, const double& ustar, const double& slope_angle); + double compMassFlux(const ElementData& Edata, const double& ustar, const double& slope_angle) const; - Saltation saltation; // The saltation model used - bool enforce_measured_snow_heights, snow_redistribution, snow_erosion; // Will be read from cfg object - bool alpine3d; ///< triggers various tricks for Alpine3D (including reducing the number of warnings) - double sn_dt; //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH - int nSlopes; + const Saltation saltation; // The saltation model used + const bool enforce_measured_snow_heights, snow_redistribution, snow_erosion; // Will be read from cfg object + const bool alpine3d; ///< triggers various tricks for Alpine3D (including reducing the number of warnings) + const double sn_dt; //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH static const bool msg_erosion; }; //End class SnowDrift diff --git a/third_party/snowpack/SnowpackConfig.cc b/third_party/snowpack/SnowpackConfig.cc index 631cefc6..56fc2c56 100644 --- a/third_party/snowpack/SnowpackConfig.cc +++ b/third_party/snowpack/SnowpackConfig.cc @@ -26,23 +26,31 @@ using namespace std; /************************************************************ * static section * ************************************************************/ +map SnowpackConfig::snowpackConfig; map SnowpackConfig::advancedConfig; map SnowpackConfig::inputConfig; map SnowpackConfig::outputConfig; +map SnowpackConfig::TechSnowConfig; const bool SnowpackConfig::__init = SnowpackConfig::initStaticData(); bool SnowpackConfig::initStaticData() { + //[Snowpack] section + advancedConfig["SOIL_FLUX"] = "false"; + //[SnowpackAdvanced] section advancedConfig["ADVECTIVE_HEAT"] = "false"; - advancedConfig["ALPINE3D"] = "false"; advancedConfig["ALLOW_ADAPTIVE_TIMESTEPPING"] = "true"; + advancedConfig["ALPINE3D"] = "false"; + advancedConfig["ALPINE3D_PTS"] = "false"; advancedConfig["DETECT_GRASS"] = "false"; advancedConfig["ALBEDO_FIXEDVALUE"] = "-999."; advancedConfig["ALBEDO_PARAMETERIZATION"] = "LEHNING_2"; advancedConfig["ALBEDO_AVERAGE_SCHMUCKI"] = "ALL_DATA"; advancedConfig["ALBEDO_AGING"] = "true"; + advancedConfig["COUPLEDPHASECHANGES"] = "false"; + advancedConfig["ENABLE_VAPOUR_TRANSPORT"] = "false"; advancedConfig["FIXED_POSITIONS"] = ""; advancedConfig["FORCE_RH_WATER"] = "true"; advancedConfig["HARDNESS_PARAMETERIZATION"] = "MONTI"; @@ -92,34 +100,49 @@ bool SnowpackConfig::initStaticData() advancedConfig["WATER_LAYER"] = "false"; advancedConfig["WATERTRANSPORTMODEL_SNOW"]="BUCKET"; advancedConfig["WATERTRANSPORTMODEL_SOIL"]="BUCKET"; - advancedConfig["LB_COND_WATERFLUX"]="FREEDRAINAGE"; // Only for use with RE. - advancedConfig["AVG_METHOD_HYDRAULIC_CONDUCTIVITY"]="ARITHMETICMEAN"; // Only for use with RE. + advancedConfig["LB_COND_WATERFLUX"]="FREEDRAINAGE"; // Only for use with RE. + advancedConfig["AVG_METHOD_HYDRAULIC_CONDUCTIVITY"]="ARITHMETICMEAN"; // Only for use with RE. + advancedConfig["HYDRAULIC_CONDUCTIVITY_FROZEN_SOIL"]="IGNORE"; // Only for use with RE. + advancedConfig["PREF_FLOW" ] = "false"; // Only for use with RE. + advancedConfig["PREF_FLOW_PARAM_TH"] = "0.1"; // Only for use with RE and preferential flow. + advancedConfig["PREF_FLOW_PARAM_N"] = "0.0"; // Only for use with RE and preferential flow. + advancedConfig["PREF_FLOW_PARAM_HETEROGENEITY_FACTOR"] = "1.0"; // Only for use with RE and preferential flow. + advancedConfig["PREF_FLOW_RAIN_INPUT_DOMAIN" ] = "MATRIX"; // Only for use with RE. + advancedConfig["ICE_RESERVOIR" ] = "false"; // Only for use with RE and preferential flow. + advancedConfig["REQ_INITIALIZE_SOIL" ] = "false"; // Only for use with RE. advancedConfig["ADJUST_HEIGHT_OF_METEO_VALUES"] = "true"; advancedConfig["ADJUST_HEIGHT_OF_WIND_VALUE"] = "true"; advancedConfig["WIND_SCALING_FACTOR"] = "1.0"; - advancedConfig["ADVECTIVE_HEAT"] = "0.0"; + advancedConfig["ADVECTIVE_HEAT"] = "false"; advancedConfig["HEAT_BEGIN"] = "0.0"; advancedConfig["HEAT_END"] = "0.0"; advancedConfig["TWO_LAYER_CANOPY"] = "true"; advancedConfig["CANOPY_HEAT_MASS"] = "true"; advancedConfig["CANOPY_TRANSMISSION"] = "true"; advancedConfig["FORESTFLOOR_ALB"] = "true"; + advancedConfig["SOIL_EVAP_MODEL"] = "EVAP_RESISTANCE"; + advancedConfig["SOIL_THERMAL_CONDUCTIVITY"] = "FITTED"; + //temporary keys for Stability until we decide for a permanent solution advancedConfig["MULTI_LAYER_SK38"] = "false"; advancedConfig["SSI_IS_RTA"] = "false"; + // followings are for input + advancedConfig["RIME_INDEX"] = "false"; + advancedConfig["NEWSNOW_LWC"] = "false"; + advancedConfig["READ_DSM"] = "false"; + //[Input] section inputConfig["METEOPATH"] = "./input"; inputConfig["NUMBER_OF_SOLUTES"] = "0"; inputConfig["SNOW"] = "SMET"; inputConfig["SOLUTE_NAMES"] = "NITRATE"; - inputConfig["ISWR_IS_NET"] = "false"; //[Output] section outputConfig["AGGREGATE_PRO"] = "false"; outputConfig["AGGREGATE_PRF"] = "false"; outputConfig["AVGSUM_TIME_SERIES"] = "true"; - outputConfig["BACKUP_DAYS_BETWEEN"] = "365."; + outputConfig["SNOW_DAYS_BETWEEN"] = "365."; outputConfig["CLASSIFY_PROFILE"] = "false"; outputConfig["CUMSUM_MASS"] = "false"; outputConfig["EXPERIMENT"] = "NO_EXP"; @@ -142,12 +165,24 @@ bool SnowpackConfig::initStaticData() outputConfig["PROF_FORMAT"] = "PRO"; outputConfig["PROF_DAYS_BETWEEN"] = "1"; outputConfig["PROF_START"] = "0"; + outputConfig["PROF_ID_OR_MK"] = "ID"; + outputConfig["PROF_AGE_OR_DATE"] = "AGE"; + outputConfig["SNOW_WRITE"] = "true"; outputConfig["SNOW"] = "SMET"; + outputConfig["HAZ_WRITE"] = "true"; outputConfig["TS_FORMAT"] = "MET"; outputConfig["TS_DAYS_BETWEEN"] = "1"; outputConfig["TS_START"] = "0"; + outputConfig["ACDD_WRITE"] = "false"; outputConfig["WRITE_PROCESSED_METEO"] = "false"; + TechSnowConfig["SNOW_GROOMING"] = "false"; + TechSnowConfig["GROOMING_WEEK_START"] = "40"; + TechSnowConfig["GROOMING_WEEK_END"] = "17"; + TechSnowConfig["GROOMING_HOUR"] = "21"; + TechSnowConfig["GROOMING_DEPTH_START"] = "0.4"; + TechSnowConfig["GROOMING_DEPTH_IMPACT"] = "0.4"; + return true; } @@ -170,12 +205,22 @@ SnowpackConfig::SnowpackConfig(const std::string& i_filename) : Config(i_filenam void SnowpackConfig::setDefaults() { //BUG we have a problem here: we try to keep the user settings if present. But we can not anymore make the difference between // default values and user set values... The whole "if xxx.empty()" does not work anymore! - string variant; getValue("VARIANT", "SnowpackAdvanced", variant); + string variant; getValue("VARIANT", "SnowpackAdvanced", variant, IOUtils::nothrow); getValue("ENFORCE_MEASURED_SNOW_HEIGHTS", "Snowpack", enforce_measured_snow_heights); + string albedo_model; getValue("ALBEDO_MODEL", "SnowpackAdvanced", albedo_model, IOUtils::nothrow); + string hn_density; getValue("HN_DENSITY", "SnowpackAdvanced", hn_density, IOUtils::nothrow); + string hn_density_parameterization; getValue("HN_DENSITY_PARAMETERIZATION", "SnowpackAdvanced", hn_density_parameterization, IOUtils::nothrow); + string metamorphism_model; getValue("METAMORPHISM_MODEL", "SnowpackAdvanced", metamorphism_model, IOUtils::nothrow); + string strength_model; getValue("STRENGTH_MODEL", "SnowpackAdvanced", strength_model, IOUtils::nothrow); + string viscosity_model; getValue("VISCOSITY_MODEL", "SnowpackAdvanced", viscosity_model, IOUtils::nothrow); + string watertransportmodel_snow; getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", watertransportmodel_snow, IOUtils::nothrow); + string watertransportmodel_soil; getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", watertransportmodel_soil, IOUtils::nothrow); + string lb_cond_waterflux; getValue("LB_COND_WATERFLUX", "SnowpackAdvanced", lb_cond_waterflux, IOUtils::nothrow); string s_minimum_l_element; getValue("MINIMUM_L_ELEMENT", "SnowpackAdvanced", s_minimum_l_element, IOUtils::nothrow); string s_height_new_elem; getValue("HEIGHT_NEW_ELEM", "SnowpackAdvanced", s_height_new_elem, IOUtils::nothrow); + if (s_minimum_l_element.empty()) addKey("MINIMUM_L_ELEMENT", "SnowpackAdvanced", advancedConfig["MINIMUM_L_ELEMENT"]); double minimum_l_element = get("MINIMUM_L_ELEMENT", "SnowpackAdvanced"); @@ -190,66 +235,65 @@ void SnowpackConfig::setDefaults() } } - string albedo_model; getValue("ALBEDO_MODEL", "SnowpackAdvanced", albedo_model, IOUtils::nothrow); - string hn_density; getValue("HN_DENSITY", "SnowpackAdvanced", hn_density, IOUtils::nothrow); - string hn_density_parameterization; getValue("HN_DENSITY_PARAMETERIZATION", "SnowpackAdvanced", hn_density_parameterization, IOUtils::nothrow); - string metamorphism_model; getValue("METAMORPHISM_MODEL", "SnowpackAdvanced", metamorphism_model, IOUtils::nothrow); - string strength_model; getValue("STRENGTH_MODEL", "SnowpackAdvanced", strength_model, IOUtils::nothrow); - string viscosity_model; getValue("VISCOSITY_MODEL", "SnowpackAdvanced", viscosity_model, IOUtils::nothrow); - string watertransportmodel_snow; getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", watertransportmodel_snow, IOUtils::nothrow); - string watertransportmodel_soil; getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", watertransportmodel_soil, IOUtils::nothrow); - string lb_cond_waterflux; getValue("LB_COND_WATERFLUX", "SnowpackAdvanced", lb_cond_waterflux, IOUtils::nothrow); if ((variant.empty()) || (variant == "DEFAULT")) { // Use default settings } else if (variant == "JAPAN") { if (albedo_model.empty()) addKey("ALBEDO_MODEL", "SnowpackAdvanced", "NIED"); + if (hn_density_parameterization.empty()) addKey("HN_DENSITY_PARAMETERIZATION", "SnowpackAdvanced", "NIED"); if (metamorphism_model.empty()) addKey("METAMORPHISM_MODEL", "SnowpackAdvanced", "NIED"); if (strength_model.empty()) addKey("STRENGTH_MODEL", "SnowpackAdvanced", "NIED"); if (viscosity_model.empty()) addKey("VISCOSITY_MODEL", "SnowpackAdvanced", "KOJIMA"); if (watertransportmodel_snow.empty()) addKey("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", "NIED"); if (watertransportmodel_soil.empty()) addKey("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", "NIED"); - } else if (variant == "ANTARCTICA") { - if (hn_density.empty()) addKey("HN_DENSITY", "SnowpackAdvanced", "EVENT"); + } else if (variant == "ANTARCTICA" || variant == "POLAR") { + if (variant == "ANTARCTICA") { + if (hn_density.empty()) addKey("HN_DENSITY", "SnowpackAdvanced", "EVENT"); - addKey("MINIMUM_L_ELEMENT", "SnowpackAdvanced", "0.0001"); //Minimum element length (m) - minimum_l_element = get("MINIMUM_L_ELEMENT", "SnowpackAdvanced"); + addKey("MINIMUM_L_ELEMENT", "SnowpackAdvanced", "0.0001"); //Minimum element length (m) + minimum_l_element = get("MINIMUM_L_ELEMENT", "SnowpackAdvanced"); - string hoar_density_buried; getValue("HOAR_DENSITY_BURIED", "SnowpackAdvanced", hoar_density_buried); + if ( !enforce_measured_snow_heights) { + stringstream ss; + const double tmp = 1.1 * minimum_l_element; +// ss << tmp; + addKey("HEIGHT_NEW_ELEM", "SnowpackAdvanced", ss.str()); + } + } + string hoar_density_buried; getValue("HOAR_DENSITY_BURIED", "SnowpackAdvanced", hoar_density_buried, IOUtils::nothrow); if (hoar_density_buried.empty()) addKey("HOAR_DENSITY_BURIED", "SnowpackAdvanced", "200.0"); - string force_rh_water; getValue("FORCE_RH_WATER", "SnowpackAdvanced", force_rh_water); + string force_rh_water; getValue("FORCE_RH_WATER", "SnowpackAdvanced", force_rh_water, IOUtils::nothrow); if (force_rh_water.empty()) addKey("FORCE_RH_WATER", "SnowpackAdvanced", "false"); - string thresh_rh; getValue("THRESH_RH", "SnowpackAdvanced", thresh_rh); + string thresh_rh; getValue("THRESH_RH", "SnowpackAdvanced", thresh_rh, IOUtils::nothrow); if (thresh_rh.empty()) addKey("THRESH_RH", "SnowpackAdvanced", "0.7"); - if ( !enforce_measured_snow_heights) { - stringstream ss; - const double tmp = 1.1 * minimum_l_element; -// ss << tmp; - addKey("HEIGHT_NEW_ELEM", "SnowpackAdvanced", ss.str()); - } - addKey("MIN_DEPTH_SUBSURF", "SnowpackAdvanced", "0."); - addKey("T_CRAZY_MIN", "SnowpackAdvanced", "165."); - addKey("T_CRAZY_MAX", "SnowpackAdvanced", "300."); + string t_crazy_min; getValue("T_CRAZY_MIN", "SnowpackAdvanced", t_crazy_min, IOUtils::nothrow); + string t_crazy_max; getValue("T_CRAZY_MAX", "SnowpackAdvanced", t_crazy_max, IOUtils::nothrow); + // If not specified in the ini file, set "polar" limits on the crazy temperatures + if (t_crazy_min.empty()) addKey("T_CRAZY_MIN", "SnowpackAdvanced", "165."); + if (t_crazy_max.empty()) addKey("T_CRAZY_MAX", "SnowpackAdvanced", "300."); addKey("NEW_SNOW_GRAIN_SIZE", "SnowpackAdvanced", "0.2"); } else if (variant == "CALIBRATION") { if (hn_density_parameterization.empty()) addKey("HN_DENSITY_PARAMETERIZATION", "SnowpackAdvanced", "ZWART"); if (viscosity_model.empty()) addKey("VISCOSITY_MODEL", "SnowpackAdvanced", "CALIBRATION"); - string fixed_positions; getValue("FIXED_POSITIONS", "SnowpackAdvanced", fixed_positions); + string fixed_positions; getValue("FIXED_POSITIONS", "SnowpackAdvanced", fixed_positions, IOUtils::nothrow); if (fixed_positions.empty()) addKey("FIXED_POSITIONS", "SnowpackAdvanced", "5"); - string number_fixed_rates; getValue("NUMBER_FIXED_RATES", "SnowpackAdvanced", number_fixed_rates); + string number_fixed_rates; getValue("NUMBER_FIXED_RATES", "SnowpackAdvanced", number_fixed_rates, IOUtils::nothrow); if (number_fixed_rates.empty()) addKey("NUMBER_FIXED_RATES", "SnowpackAdvanced", "0"); string max_number_meas_temperatures; - getValue("MAX_NUMBER_MEAS_TEMPERATURES", "SnowpackAdvanced", max_number_meas_temperatures); + getValue("MAX_NUMBER_MEAS_TEMPERATURES", "SnowpackAdvanced", max_number_meas_temperatures, IOUtils::nothrow); if (max_number_meas_temperatures.empty()) addKey("MAX_NUMBER_MEAS_TEMPERATURES", "SnowpackAdvanced", "5"); - string min_depth_subsurf; getValue("MIN_DEPTH_SUBSURF", "SnowpackAdvanced", min_depth_subsurf); + string min_depth_subsurf; getValue("MIN_DEPTH_SUBSURF", "SnowpackAdvanced", min_depth_subsurf, IOUtils::nothrow); if (min_depth_subsurf.empty()) addKey("MIN_DEPTH_SUBSURF", "SnowpackAdvanced", "0.0"); + } else if (variant == "SEAICE") { + // Initializations for sea ice + if (lb_cond_waterflux.empty()) addKey("LB_COND_WATERFLUX", "SnowpackAdvanced", "SEAICEFLOODING"); } else { throw UnknownValueException("Unknown variant " + variant, AT); } @@ -258,6 +302,12 @@ void SnowpackConfig::setDefaults() * That is, loop through advancedConfig (then inputConfig & outputConfig) and check whether user has set * the parameter in the corresponding section, if not add default value */ + for(map::const_iterator it = snowpackConfig.begin(); it != snowpackConfig.end(); ++it) { + //[Snowpack] section + string value; getValue(it->first, "Snowpack", value, IOUtils::nothrow); + if (value.empty()) addKey(it->first, "Snowpack", it->second); + } + for(map::const_iterator it = advancedConfig.begin(); it != advancedConfig.end(); ++it) { //[SnowpackAdvanced] section string value; getValue(it->first, "SnowpackAdvanced", value, IOUtils::nothrow); @@ -276,6 +326,12 @@ void SnowpackConfig::setDefaults() if (value.empty()) addKey(it->first, "Output", it->second); } + for(map::const_iterator it = TechSnowConfig.begin(); it != TechSnowConfig.end(); ++it) { + //[TechSnow] section + string value; getValue(it->first, "TechSnow", value, IOUtils::nothrow); + if (value.empty()) addKey(it->first, "TechSnow", it->second); + } + /** * @brief Defines how energy and mass balance are output \n * - AVGSUM_TIME_SERIES == 1 \n @@ -313,4 +369,14 @@ void SnowpackConfig::setDefaults() * @brief Default lower boundary condition for Richards equation solver \n */ if (watertransportmodel_soil == "RICHARDSEQUATION" && lb_cond_waterflux.empty()) addKey("LB_COND_WATERFLUX", "SnowpackAdvanced", "FREEDRAINAGE"); + + /** + * @brief Checking the settings for hydraulic conductivity \n + */ + string tmp_avg_method_K; getValue("AVG_METHOD_HYDRAULIC_CONDUCTIVITY_PREF_FLOW", "SnowpackAdvanced", tmp_avg_method_K, IOUtils::nothrow); + if (tmp_avg_method_K.empty()) { + // If not explicitly specified, take the default one (i.e., the one for matrix flow) + getValue("AVG_METHOD_HYDRAULIC_CONDUCTIVITY", "SnowpackAdvanced", tmp_avg_method_K); + addKey("AVG_METHOD_HYDRAULIC_CONDUCTIVITY_PREF_FLOW", "SnowpackAdvanced", tmp_avg_method_K); + } } diff --git a/third_party/snowpack/SnowpackConfig.h b/third_party/snowpack/SnowpackConfig.h index c982659b..beacd953 100644 --- a/third_party/snowpack/SnowpackConfig.h +++ b/third_party/snowpack/SnowpackConfig.h @@ -35,7 +35,6 @@ class SnowpackConfig : public mio::Config { */ SnowpackConfig(const std::string& i_filename); SnowpackConfig(const mio::Config& i_cfg); - ~SnowpackConfig() {} private: void setDefaults(); @@ -43,7 +42,7 @@ class SnowpackConfig : public mio::Config { static const bool __init; /// advancedConfig, inputConfig, outputConfig; + static std::map snowpackConfig, advancedConfig, inputConfig, outputConfig, TechSnowConfig; }; #endif diff --git a/third_party/snowpack/Stability.cc b/third_party/snowpack/Stability.cc index ad814700..b05a6fe9 100644 --- a/third_party/snowpack/Stability.cc +++ b/third_party/snowpack/Stability.cc @@ -19,9 +19,9 @@ */ #include "Stability.h" -#include "Constants.h" -#include "Laws_sn.h" #include "StabilityAlgorithms.h" +#include "Laws_sn.h" +#include "Constants.h" #include "Utils.h" #include @@ -166,13 +166,13 @@ void Stability::checkStability(const CurrentMeteo& Mdata, SnowStation& Xdata) double slab_mass = 0.; // Slab mass double hi_Ei = 0.; //this is the denominator of the multi layer Young's modulus - std::vector n_lemon(nN, 0.); + std::vector n_lemon(nN, 0); size_t e = nE; while (e-- > Xdata.SoilNode) { EMS[e].hard = (mapHandHardness[hardness_parameterization])(EMS[e], hoar_density_buried); EMS[e].S_dr = StabilityAlgorithms::setDeformationRateIndex(EMS[e]); StabilityData STpar(Stability::psi_ref); - + //update slab properties const double hi = EMS[e].L; slab_thick += hi; //Increment slab depth @@ -231,6 +231,9 @@ void Stability::checkStability(const CurrentMeteo& Mdata, SnowStation& Xdata) "Profile classification failed! (classifyStability_SchweizerWiesinger)"); } break; + default: + prn_msg( __FILE__, __LINE__, "err", Mdata.date, + "Profile classification failed! Unknown prof. calss provided"); } if (classify_profile) { @@ -254,7 +257,7 @@ void Stability::findWeakLayer(const double& Pk, std::vector& n_l // Initialize Swl_lemon = 0; // Lemon counter double Swl_d, Swl_n, zwl_d, zwl_n, zwl_ssi, zwl_Sk38; // Temporary weak layer markers - Swl_d = Swl_n = Swl_ssi = Swl_Sk38 = INIT_STABILITY; + Swl_d = Swl_n = Swl_ssi = Swl_Sk38 = IOUtils::nodata; zwl_d = zwl_n = zwl_ssi = zwl_Sk38 = Xdata.cH; // Natural and "deformation rate" Stability Index @@ -267,12 +270,12 @@ void Stability::findWeakLayer(const double& Pk, std::vector& n_l // Slab must be thicker than Stability::ground_rough (m) for an avalanche to release. while ((e-- > Xdata.SoilNode) && ((NDS[e+1].z + NDS[e+1].u)/cos_sl > Stability::ground_rough)) { // "deformation rate" Stability Index: find minimum ... - if (Swl_d > EMS[e].S_dr) { + if (Swl_d > EMS[e].S_dr || Swl_d == IOUtils::nodata) { Swl_d = EMS[e].S_dr; zwl_d = (NDS[e].z + NDS[e+1].z + NDS[e].u + NDS[e+1].u)/2.; } // Natural Stability Index: find minimum ... - if ( Swl_n > NDS[e+1].S_n ) { + if ( Swl_n > NDS[e+1].S_n || Swl_n == IOUtils::nodata) { Swl_n = NDS[e+1].S_n; zwl_n = NDS[e+1].z + NDS[e+1].u; } @@ -301,7 +304,8 @@ void Stability::findWeakLayer(const double& Pk, std::vector& n_l while ((e-- > Xdata.SoilNode) && (((Xdata.cH - (NDS[e+1].z + NDS[e+1].u))/cos_sl) < (Pk + Stability::skier_depth)) && ((NDS[e+1].z + NDS[e+1].u)/cos_sl > Stability::ground_rough)) { // Skier Stability Index: find minimum OR consider number of structural instabilities in case of near equalities - if ( (Swl_ssi > NDS[e+1].ssi) || ((fabs(Swl_ssi - NDS[e+1].ssi) < 0.09) && (n_lemon[e+1] > Swl_lemon)) ) { + if ( (Swl_ssi > NDS[e+1].ssi || Swl_ssi == IOUtils::nodata) + || ((fabs(Swl_ssi - NDS[e+1].ssi) < 0.09) && (n_lemon[e+1] > Swl_lemon)) ) { Swl_ssi = NDS[e+1].ssi; zwl_ssi = NDS[e+1].z + NDS[e+1].u ; Swl_lemon = n_lemon[e+1]; diff --git a/third_party/snowpack/Stability.h b/third_party/snowpack/Stability.h index c828a351..d24e031d 100644 --- a/third_party/snowpack/Stability.h +++ b/third_party/snowpack/Stability.h @@ -20,8 +20,8 @@ #ifndef STABILITY_H #define STABILITY_H -#include "DataClasses.h" #include "StabilityAlgorithms.h" +#include "DataClasses.h" #include #include diff --git a/third_party/snowpack/StabilityAlgorithms.cc b/third_party/snowpack/StabilityAlgorithms.cc index 44e0d2c3..94a1726a 100644 --- a/third_party/snowpack/StabilityAlgorithms.cc +++ b/third_party/snowpack/StabilityAlgorithms.cc @@ -19,10 +19,10 @@ */ #include "StabilityAlgorithms.h" -#include "Constants.h" #include "Laws_sn.h" -#include "Stability.h" +#include "Constants.h" #include "Utils.h" +#include "Stability.h" #include @@ -435,11 +435,11 @@ double StabilityAlgorithms::getHandHardnessASARC(const ElementData& Edata, const */ double StabilityAlgorithms::compCriticalStress(const double& epsNeckDot, const double& Ts) { - const double sigBrittle=1.e7; // Brittle fracture stress of ice (Pa) - const double C1=-6.6249; // Constant - const double C2=6.0780e-2; // Constant - const double C3=-1.3380e-4; // Constant - const double P1=70.000; // Constant (Pa) + static const double sigBrittle=1.e7; // Brittle fracture stress of ice (Pa) + static const double C1=-6.6249; // Constant + static const double C2=6.0780e-2; // Constant + static const double C3=-1.3380e-4; // Constant + static const double P1=70.000; // Constant (Pa) // Find the rate dependent friction angle phi const double phi = P1*pow(fabs(epsNeckDot), 0.23)*mio::Cst::to_rad; // Function of strain rate dependent failure surface @@ -474,10 +474,10 @@ double StabilityAlgorithms::setDeformationRateIndex(ElementData& Edata) return(0.1); } - const double eps1Dot = 1.76e-7; // Unit strain rate (at stress = 1 MPa) (s-1) - const double sig1 = 0.5e6; // Unit stress from Sinha's formulation (Pa) + static const double eps1Dot = 1.76e-7; // Unit strain rate (at stress = 1 MPa) (s-1) + static const double sig1 = 0.5e6; // Unit stress from Sinha's formulation (Pa) const double sig = -Edata.C; // Overburden stress, that is, absolute value of Cauchy stress (Pa) - const double Te = std::min(Edata.Te, Edata.melting_tk); // Element temperature (K) + const double Te = std::min(Edata.Te, Edata.meltfreeze_tk); // Element temperature (K) // First find the absolute neck stress const double sigNeck = Edata.neckStressEnhancement() * (sig); // Neck stress (Pa) @@ -494,7 +494,7 @@ double StabilityAlgorithms::setDeformationRateIndex(ElementData& Edata) */ double StabilityAlgorithms::compPenetrationDepth(const SnowStation& Xdata) { - double rho_Pk = Constants::eps2, dz_Pk = Constants::eps2; // Penetration depth Pk, from mean slab density + double rho_Pk = 0., dz_Pk = 0.; // Penetration depth Pk, from mean slab density double top_crust = 0., thick_crust = 0.; // Crust properties bool crust = false; // Checks for crust size_t e_crust = Constants::stundefined; @@ -527,6 +527,9 @@ double StabilityAlgorithms::compPenetrationDepth(const SnowStation& Xdata) } } } + + if (dz_Pk == 0.) return IOUtils::nodata; + rho_Pk /= dz_Pk; //weighted average density of the snow slab penetrated by the skier // NOTE Pre-factor 0.8 introduced May 2006 by S. Bellaire @@ -585,17 +588,21 @@ double StabilityAlgorithms::getLayerSkierStability(const double& Pk, const doubl } } -bool StabilityAlgorithms::normalizeLemon(std::vector& vecData) +bool StabilityAlgorithms::normalizeVector(std::vector& vecData) { - if (vecData.empty()) return false; - const double mean = mio::Interpol1D::arithmeticMean( vecData ); - const double std_dev = mio::Interpol1D::std_dev( vecData ); - if (std_dev==IOUtils::nodata || std_dev==0.) return false; - - for (size_t ii=0; ii& NDS = Xdata.Ndata; vector& EMS = Xdata.Edata; const size_t nE = EMS.size(); - size_t e = nE; const double cos_sl = Xdata.cos_sl; - const double hs_top = (NDS[e].z+NDS[e].u - NDS[Xdata.SoilNode].z) / cos_sl; + const double hs_top = (NDS[nE].z+NDS[nE].u - NDS[Xdata.SoilNode].z) / cos_sl; std::vector vecRG, vecRG_diff, vecHard, vecHard_diff, vecTypes; std::vector weibull, crust_index; double crust_coeff = 0.; + size_t e = nE-1; + NDS[ nE ].ssi = 0.; //the top node is assumed perfectly stable while (e-- > Xdata.SoilNode) { - NDS[ e ].ssi = 0.; //initialize with 0 so layers that can not get computed don't t in the way + NDS[ e ].ssi = 0.; //initialize with 0 so layers that can not get computed don't get in the way vecRG.push_back( EMS[e].rg ); - vecRG_diff.push_back( fabs(EMS[e+1].rg - EMS[e].rg) ); - vecHard.push_back( EMS[e].hard ); - vecHard_diff.push_back( fabs(EMS[e+1].hard - EMS[e].hard) ); + vecHard.push_back( EMS[e].hard ); + if (e > 0) { + vecRG_diff.push_back(std::max(EMS[e+1].hard>EMS[e].hard ? fabs(EMS[e+1].rg - EMS[e].rg) : 0, + EMS[e-1].hard>EMS[e].hard ? fabs(EMS[e-1].rg - EMS[e].rg) : 0 + )); + vecHard_diff.push_back(std::max(EMS[e+1].hard>EMS[e].hard ? fabs(EMS[e+1].hard - EMS[e].hard) : 0, + EMS[e-1].hard>=EMS[e].hard ? fabs(EMS[e-1].hard - EMS[e].hard) : 0 + )); + } else { + vecRG_diff.push_back(EMS[e+1].hard>EMS[e].hard ? fabs(EMS[e+1].rg - EMS[e].rg) : 0); + vecHard_diff.push_back(EMS[e+1].hard>EMS[e].hard ? fabs(EMS[e+1].hard - EMS[e].hard) : 0); + } //grain types receive a score depending on their primary and secondary forms const unsigned short int primary = static_cast( EMS[e].type / 100 %100 ); @@ -642,61 +659,61 @@ bool StabilityAlgorithms::getRelativeThresholdSum(SnowStation& Xdata) const double layer_depth = hs_top - (NDS[e].z+NDS[e].u - NDS[Xdata.SoilNode].z)/cos_sl; static const double w1 = 2.5; static const double w2 = 50.; - const double weibull_depth = (w1/w2) * pow(layer_depth, w1-1.) * exp( -1*pow(layer_depth/w2, w1) ); + const double weibull_depth = (w1/w2) * pow((layer_depth*100), w1-1.) * exp( -1*pow((layer_depth*100)/w2, w1) ); //compute crust factor - const bool crust_cond = (EMS[e].L>=1. && EMS[e].hard>=3 ); - const double crust_value = (crust_cond)? exp( -(hs_top - (NDS[e+1].z+NDS[e+1].u - NDS[Xdata.SoilNode].z)/cos_sl/20. ) ) : 0.; + const bool crust_cond = (EMS[e].L>=0.01 && EMS[e].hard>=3 ); + const double crust_value = (crust_cond)? exp( -((hs_top*100) - ((NDS[e+1].z+NDS[e+1].u - NDS[Xdata.SoilNode].z)*100)/cos_sl)/20. ) : 0.; crust_coeff += crust_value; - weibull.push_back( weibull_depth - crust_coeff ); //store the weibull corrected for the crust coefficient + const double crust_contrib = weibull_depth - crust_coeff; + if (crust_contrib>0) + weibull.push_back( crust_contrib ); //store the weibull corrected for the crust coefficient + else + weibull.push_back( 0. ); //the crust is so thick that there is no additional load below } //calculate the normalization parameters - if (!normalizeLemon(vecRG)) return false; + normalizeVector(vecRG); const double RG_min = mio::Interpol1D::min_element( vecRG ); const double RG_max = mio::Interpol1D::max_element( vecRG ); - if (RG_min==RG_max) return false; - if (!normalizeLemon(vecRG_diff)) return false; + normalizeVector(vecRG_diff); const double RG_diff_min = mio::Interpol1D::min_element( vecRG_diff ); const double RG_diff_max = mio::Interpol1D::max_element( vecRG_diff ); - if (RG_diff_min==RG_diff_max) return false; - if (!normalizeLemon(vecHard)) return false; + normalizeVector(vecHard); const double hard_min = mio::Interpol1D::min_element( vecHard ); const double hard_max = mio::Interpol1D::max_element( vecHard ); - if (hard_min==hard_max) return false; - if (!normalizeLemon(vecHard_diff)) return false; + normalizeVector(vecHard_diff); const double hard_diff_min = mio::Interpol1D::min_element( vecHard_diff ); const double hard_diff_max = mio::Interpol1D::max_element( vecHard_diff ); - if (hard_diff_min==hard_diff_max) return false; - if (!normalizeLemon(vecTypes)) return false; + normalizeVector(vecTypes); const double type_min = mio::Interpol1D::min_element( vecTypes ); const double type_max = mio::Interpol1D::max_element( vecTypes ); - if (type_min==type_max) return false; - + + const double dp_min = mio::Interpol1D::min_element( weibull ); const double dp_max = mio::Interpol1D::max_element( weibull ); - if (dp_min==dp_max) return false; vector index; double max_index = 0.; for (size_t ii=0; iimax_index) max_index = index.back(); } + if (max_index==0.) max_index=1.; //in this case, all index()==0 so we will set all ssi to zero for (size_t ii=0; ii. -*/ -#ifndef STABILITYALGORITHMS_H -#define STABILITYALGORITHMS_H - -#include "DataClasses.h" - -/** - * @class StabilityData - * @brief Layer shear strength evaluation parameters. - * This class contains layer properties useful for the shear strength evaluation. - * - * @ingroup data_structures - */ -class StabilityData { - public: - /** @brief StabilityData constructor. - * @param i_psi_ref slope angle to use for the stability evaluation (in degrees) - * @note alpha_max(38.) = 54.3 deg (J. Schweizer, IB 712, SLF) - */ - StabilityData(const double& i_psi_ref) : Sig_c2(Constants::undefined), strength_upper(1001.), phi(0.0), - sig_n(Constants::undefined), sig_s(Constants::undefined), - alpha_max_rad(54.3*mio::Cst::to_rad), psi_ref(i_psi_ref*mio::Cst::to_rad), cos_psi_ref(cos(i_psi_ref*mio::Cst::to_rad)), sin_psi_ref(sin(i_psi_ref*mio::Cst::to_rad)) {} - - double Sig_c2; ///< Element shear strength (kPa) - double strength_upper; ///< Shear strength of adjacent upper element - double phi; ///< Correction to normal load - double sig_n; ///< Normal load on upper element node, perpendicular to slope - double sig_s; ///< Shear stress on upper element node, parallel to slope - double alpha_max_rad; ///< Angle from snow surface to peak shear stress, 54.3 at 38 deg - double psi_ref; ///< Reference slope angle in radian, corresponds usually to 38 deg - double cos_psi_ref; ///< Cosine of psi_ref - double sin_psi_ref; ///< Sine of psi_ref -}; - -/** @brief Implementations of various algorithms useful for evaluating the stability. - * These algorithms fall within the following categories: - * + the structural stability, that selects the potential weak layer, for example the SK38, SSI or Relative Threshold; - * + the stability index, based on a strength/stress relation, such as the natural stability index. This evaluates the stability of a selected layer; - * + the critical crack length that also evaluates the stability of a selected layer, for example the critical cut length or the anti-crack model. - * - * These methods will then be used by the Stability class to effectively compute the stability for a given profile, according to the user's configuration. - * @ingroup postprocessing - */ -class StabilityAlgorithms { - public: - static void classifyStability_SchweizerBellaire(const double& Swl_ssi, const double& Swl_Sk38, SnowStation& Xdata); - static void classifyStability_Bellaire(const double& Swl_ssi, SnowStation& Xdata); - static void classifyStability_SchweizerBellaire2(const double& Swl_ssi, const size_t& Swl_lemon, const double& Swl_Sk38, SnowStation& Xdata); - static bool classifyStability_SchweizerWiesinger(SnowStation& Xdata); - static bool classifyType_SchweizerLuetschg(SnowStation& Xdata); - - static bool setShearStrengthDEFAULT(const double& cH, const double& cos_sl, const mio::Date& date, - ElementData& Edata, NodeData& Ndata, StabilityData& STpar); - static bool setShearStrength_NIED(const double& cH, const double& cos_sl, const mio::Date& date, - ElementData& Edata, NodeData& Ndata, StabilityData& STpar); - - static double getHandHardnessBELLAIRE(const ElementData& Edata, const double& buried_hoar_density); - static double getHandHardnessASARC(const ElementData& Edata, const double& buried_hoar_density); - static double getHandHardnessMONTI(const ElementData& Edata, const double& buried_hoar_density); - - static double getHandHardnessMONTI(const int& F, const double& rho, const double& water_content, const double& buried_hoar_density); - static double compCriticalStress(const double& epDotn, const double& T_s); - static double setDeformationRateIndex(ElementData& Edata); - static double compPenetrationDepth(const SnowStation& Xdata); - static void compReducedStresses(const double& stress, const double& cos_sl, StabilityData& STpar); - - static double getNaturalStability(const StabilityData& STpar); - static double getLayerSkierStability(const double& penetrationDepth, const double& depth_lay, const StabilityData& STpar); - static bool getRelativeThresholdSum(SnowStation& Xdata); - - static double CriticalCutLength(const double& H_slab, const double& rho_slab, const double& cos_sl, const ElementData& Edata, const StabilityData& STpar, const double& stress); - private: - static bool normalizeLemon(std::vector& vecData); -}; - -#endif +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ +#ifndef STABILITYALGORITHMS_H +#define STABILITYALGORITHMS_H + +#include "DataClasses.h" + +/** + * @class StabilityData + * @brief Layer shear strength evaluation parameters. + * This class contains layer properties useful for the shear strength evaluation. + * + * @ingroup data_structures + */ +class StabilityData { + public: + /** @brief StabilityData constructor. + * @param i_psi_ref slope angle to use for the stability evaluation (in degrees) + * @note alpha_max(38.) = 54.3 deg (J. Schweizer, IB 712, SLF) + */ + StabilityData(const double& i_psi_ref) : Sig_c2(Constants::undefined), strength_upper(1001.), phi(0.0), + sig_n(Constants::undefined), sig_s(Constants::undefined), + alpha_max_rad(54.3*mio::Cst::to_rad), psi_ref(i_psi_ref*mio::Cst::to_rad), cos_psi_ref(cos(i_psi_ref*mio::Cst::to_rad)), sin_psi_ref(sin(i_psi_ref*mio::Cst::to_rad)) {} + + double Sig_c2; ///< Element shear strength (kPa) + double strength_upper; ///< Shear strength of adjacent upper element + double phi; ///< Correction to normal load + double sig_n; ///< Normal load on upper element node, perpendicular to slope + double sig_s; ///< Shear stress on upper element node, parallel to slope + double alpha_max_rad; ///< Angle from snow surface to peak shear stress, 54.3 at 38 deg + double psi_ref; ///< Reference slope angle in radian, corresponds usually to 38 deg + double cos_psi_ref; ///< Cosine of psi_ref + double sin_psi_ref; ///< Sine of psi_ref +}; + +/** @brief Implementations of various algorithms useful for evaluating the stability. + * These algorithms fall within the following categories: + * + the structural stability, that selects the potential weak layer, for example the SK38, SSI or Relative Threshold; + * + the stability index, based on a strength/stress relation, such as the natural stability index. This evaluates the stability of a selected layer; + * + the critical crack length that also evaluates the stability of a selected layer, for example the critical cut length or the anti-crack model. + * + * These methods will then be used by the Stability class to effectively compute the stability for a given profile, according to the user's configuration. + * @ingroup postprocessing + */ +class StabilityAlgorithms { + public: + static void classifyStability_SchweizerBellaire(const double& Swl_ssi, const double& Swl_Sk38, SnowStation& Xdata); + static void classifyStability_Bellaire(const double& Swl_ssi, SnowStation& Xdata); + static void classifyStability_SchweizerBellaire2(const double& Swl_ssi, const size_t& Swl_lemon, const double& Swl_Sk38, SnowStation& Xdata); + static bool classifyStability_SchweizerWiesinger(SnowStation& Xdata); + static bool classifyType_SchweizerLuetschg(SnowStation& Xdata); + + static bool setShearStrengthDEFAULT(const double& cH, const double& cos_sl, const mio::Date& date, + ElementData& Edata, NodeData& Ndata, StabilityData& STpar); + static bool setShearStrength_NIED(const double& cH, const double& cos_sl, const mio::Date& date, + ElementData& Edata, NodeData& Ndata, StabilityData& STpar); + + static double getHandHardnessBELLAIRE(const ElementData& Edata, const double& buried_hoar_density); + static double getHandHardnessASARC(const ElementData& Edata, const double& buried_hoar_density); + static double getHandHardnessMONTI(const ElementData& Edata, const double& buried_hoar_density); + + static double getHandHardnessMONTI(const int& F, const double& rho, const double& water_content, const double& buried_hoar_density); + static double compCriticalStress(const double& epDotn, const double& T_s); + static double setDeformationRateIndex(ElementData& Edata); + static double compPenetrationDepth(const SnowStation& Xdata); + static void compReducedStresses(const double& stress, const double& cos_sl, StabilityData& STpar); + + static double getNaturalStability(const StabilityData& STpar); + static double getLayerSkierStability(const double& penetrationDepth, const double& depth_lay, const StabilityData& STpar); + static bool getRelativeThresholdSum(SnowStation& Xdata); + + static double CriticalCutLength(const double& H_slab, const double& rho_slab, const double& cos_sl, const ElementData& Edata, const StabilityData& STpar, const double& stress); + private: + static bool normalizeVector(std::vector& vecData); +}; + +#endif diff --git a/third_party/snowpack/TechnicalSnow.cc b/third_party/snowpack/TechnicalSnow.cc new file mode 100644 index 00000000..ea59c532 --- /dev/null +++ b/third_party/snowpack/TechnicalSnow.cc @@ -0,0 +1,172 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND + */ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ +/** + * @file TechnicalSnow.cc + * @author Mathias Bavay, Pirmin Ebner and others + * @brief Implementation of technical snow production and grooming + */ + +#include "TechnicalSnow.h" +#include "Utils.h" + +#include + +using namespace std; +using namespace mio; + +/** + * @page technical_snow Technical snow + * The technical snow module contains everything that is required to simulate the technical snow management as + * performed in ski resorts. This includes both snow grooming and + * technical snow production. + * In order to keep the configuration quite simple and to reduce the need for detailed operational data, the grooming operations are simplified: it + * is assumed that snow grooming is performed between a specific calendar week, everyday at a specific time and ends at another given calendar week. In contrast, + * there is more flexibility regarding snow production: it is triggered when the forcing data contains positive values in the psum_tech field. + * + * @section snow_grooming Snow grooming + * The densification and mixing of the upper layers of the snow are simulated by the snow grooming module (see TechSnow::preparation). it is configured with the following keys: + * - GROOMING_WEEK_START: calendar week when to start grooming operations; + * - GROOMING_WEEK_END: calendar week when to end grooming operations; + * - GROOMING_HOUR: at what time of the day is snow grooming performed; + * - GROOMING_DEPTH_START: minimum snow height necessary for groooming (if there is less than GROOMING_DEPTH_START, grooming will be skipped); + * - GROOMING_DEPTH_IMPACT: maximum grooming depth on the snow. Snow layers deeper than GROOMING_DEPTH_IMPACT are left unchanged. + * + * @section snow_production Snow production + * When a meteorological forcing named psum_tech has a positive value, snow production will be triggered. The mass of produced snow is given by psum_tech + * while other properties (such as snow temperature and liquid water content as well as the partition between solid and liquid precipitation) are computed based + * on the local . Average values of water losses due to wind and + * evaporation are assumed together with an average wind speed suitable for snow production (this means that the true, local wind speed is not used, instead + * it is assumed that the snow production has been triggered because the wind speed was not too high). + * + * More details are given in TechSnow::productionPpt. + * + */ + +TechSnow::TechSnow(const SnowpackConfig& cfg) + : grooming_week_start(cfg.get("GROOMING_WEEK_START", "TechSnow")), grooming_week_end(cfg.get("GROOMING_WEEK_END", "TechSnow")), + grooming_hour(cfg.get("GROOMING_HOUR", "TechSnow")), min_depth( cfg.get("GROOMING_DEPTH_START", "TechSnow")), + max_depth( cfg.get("GROOMING_DEPTH_IMPACT", "TechSnow")) +{} + +/** + * @brief Defined time when the slope preparation happens (default: 9:00PM) and only for the winter season (default: week < 17 && week > 46). + * @details Returns true if snow should be prepared + * @param[in] current_date current date + * @return true if the snow should be prepared, false otherwise + */ +bool TechSnow::prepare(const mio::Date& current_date) const +{ + const unsigned short iso_week = current_date.getISOWeekNr(); + + if (iso_week>grooming_week_end && iso_week"Pistenpräparation und Pistenpflege. Das Handbuch für den Praktiker.", Davos: WSL-Institut für Schnee-und Lawineforschung SLF, (2018). + * @param[in] Mdata Meteorological data + * @param[in] cumu_precip cumulated amount of precipitation (kg m-2) + * @param[out] Tw technical snow temperature (K) + * @param[out] rho_hn technical snow density (kg/m3) + * @param[out] delta_cH new snow fall height (m) + * @param[out] theta_w liquid water fraction of the new elements to be created (1) + */ +void TechSnow::productionPpt(const CurrentMeteo& Mdata, const double& cumu_precip, double &Tw, double &rho_hn, double &delta_cH, double &theta_w) +{ + static const double rho_w = 999.9; // (kg/m3) Density water (kg/m3) @ 1-4°C + static const double tech_lost = 0.15; // (-) Water loss due to wind, evaporation, etc + static const double T_water = 1.5; // (C) Average water temperature for the technical snow production + static const double v_wind = 1.5; // (m/s) Average wind condition for snow production + static const double V_water = 100.; // (l/min) average water supply by the snow guns + + Tw = IOUtils::K_TO_C(Mdata.ta) * atan(0.151977 * sqrt(Mdata.rh*100. + 8.313659)) + atan(IOUtils::K_TO_C(Mdata.ta) + Mdata.rh*100.) - atan(Mdata.rh*100. - 1.676331) + 0.00391838 * pow(Mdata.rh*100, 1.5) * atan(0.023101 * Mdata.rh*100) - 4.686035; // (°C) Wet-bulb temperature + rho_hn = 1.7261 * Optim::pow2(Tw) + 37.484 * Tw + 505.05; // (kg/m3) density of technical snow (kg/m3) dependent from the wet-bulb temperature + + double LWC_max = 29.76 - 11.71 * log(std::abs(Tw)) + 1.07*T_water - 1.6 * v_wind; // (%vol) liquid water content at 55 l/min + if (LWC_max < 0.) LWC_max = 0.; + + const double LWC = (0.004 * V_water + 0.52) * 100 * (rho_hn/917.) / 36.3 * LWC_max * 0.4; // (%vol) liquid water content (average value multiply by 0.4) + const double psum_snow_tech = Mdata.psum_tech * rho_w / rho_hn * (1. - tech_lost); // Technical snow production (mm) dependent from water loss and amount of provided water + const double precip_snow = psum_snow_tech + cumu_precip * (1. - Mdata.psum_ph); // (mm) + const double precip_rain = (Mdata.psum) * Mdata.psum_ph; // (mm) + + delta_cH = (precip_snow / rho_hn); // Actual enforced snow depth // (m4/kg) + theta_w = precip_rain / (delta_cH * Constants::density_water) + LWC*0.01; // (-) volume fraction of liquid water in each element + Tw = IOUtils::C_TO_K( Tw ); //convert Tw back to K +} + +/** + * @brief Perform technical snow preparation. The technical snow preparation has only an influence on the upper 40 cm (default) and started with minimum snow depth of 40 cm (default). The maximum preparation density is 450 kg/m3. + * @details The densification is done with a fit on the data found in Wolfsperger, F., H. Rhyner, and M. Schneebeli, + * <"Pistenpräparation und Pistenpflege. Das Handbuch für den Praktiker.", Davos: WSL-Institut für Schnee-und Lawineforschung SLF, (2018). + * @param[in] Xdata Snow profile to prepare (grooming, etc) + */ +void TechSnow::preparation(SnowStation& Xdata) const +{ + static const double max_grooming_density = 450.; //this is the maximum value of rho_groom (see equation below) and also a realistic achievable upper value + static const double original_density_threshold = 415.; //this is EMS[e].Rho that produces the maximum value of rho_groom (see equation below) + const double snow_height = Xdata.cH - Xdata.Ground; + + if (snow_height < min_depth) return; // Grooming only if there is enough snow + + vector& NDS = Xdata.Ndata; + vector& EMS = Xdata.Edata; + const size_t nE = Xdata.getNumberOfElements(); + double depth = 0.; + for (size_t e=nE; e-- > Xdata.SoilNode; ) { + const double L0 = EMS[e].L; + depth += L0; + + if (EMS[e].Rho <= max_grooming_density) { //no grooming for snow that is already denser than what is achievable + const double rho_groom = (EMS[e].Rho > original_density_threshold)? max_grooming_density : 12.152 * sqrt(448.78 - EMS[e].Rho) + 0.9963 * EMS[e].Rho - 35.41; // Density of the groomed snow, fit done on "Pistenpräparation und Pistenpflege. Das Handbuch für den Praktiker.", F Wolfsperger, H Rhyner, M Schneebeli, 2018 + const double L1 = EMS[e].L * EMS[e].Rho / rho_groom; // New lenght of the element after grooming + EMS[e].L0 = L1; + EMS[e].L = L1; + EMS[e].Rho = rho_groom; + EMS[e].theta[WATER] *= L0 / L1; + EMS[e].theta[WATER_PREF] *= L0 / L1; + EMS[e].theta[ICE] *= L0 / L1; + EMS[e].theta_i_reservoir = 0.0; + EMS[e].theta_i_reservoir_cumul = 0.0; + EMS[e].dd = 0.; + EMS[e].sp = 1.; + EMS[e].rg = 0.2; // Have to adapt after some tests + EMS[e].rb = EMS[e].rg/3.; + NDS[e+1].z = NDS[e].z + EMS[e].L; + EMS[e].theta[AIR] = 1.0 - EMS[e].theta[WATER] - EMS[e].theta[WATER_PREF] - EMS[e].theta[ICE] - EMS[e].theta[SOIL]; + if ( !(EMS[e].theta[AIR]>=0.1) ) { + prn_msg(__FILE__, __LINE__, "err", Date(), + "Error in Slope Preparation (Densification) Volume contents: e=%d nE=%d rho=%lf ice=%lf wat=%lf wat_pref=%lf air=%le", + e, nE, EMS[e].Rho, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[WATER_PREF], EMS[e].theta[AIR]); + throw IOException("Runtime Error in snowPreparation()", AT); + } + Xdata.cH = NDS[nE].z + NDS[nE].u; // Update computed snow depth + if (depth > max_depth) break; // Grooming has only an influence on the upper layers + } + } +} diff --git a/third_party/snowpack/TechnicalSnow.h b/third_party/snowpack/TechnicalSnow.h new file mode 100644 index 00000000..0d967e86 --- /dev/null +++ b/third_party/snowpack/TechnicalSnow.h @@ -0,0 +1,55 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND + */ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ + +#ifndef TECHNICALSNOW_H +#define TECHNICALSNOW_H + +#include "DataClasses.h" + +/** + * @brief Implementation of snow grooming + * @details + * + * This module relies on the following configuration keys, all in the [TechSnow] section: + * - SNOW_GROOMING: if set to true, enables this module (default: false); + * - GROOMING_WEEK_START: ISO week number when to start grooming (default: 40) + * - GROOMING_WEEK_END: ISO week number when to stop grooming (default: 17) + * - GROOMING_HOUR: at what time should grooming be performed? (default: 21 hour) + * - GROOMING_DEPTH_START: how much snow must be on the ground to start grooming [m] (default: 0.4); + * - GROOMING_DEPTH_IMPACT: maximum depth of snow impacted by grooming [m] (default: 0.4); + * + * @author Mathias Bavay, Pirmin Ebner and others + * @ingroup postprocessing + */ + +class TechSnow { + public: + TechSnow(const SnowpackConfig& cfg); + + bool prepare(const mio::Date& current_date) const; + void preparation(SnowStation& Xdata) const; + static void productionPpt(const CurrentMeteo& Mdata, const double& cumu_precip, double &Tw, double &rho_hn, double &delta_cH, double &theta_w); + + private: + double grooming_week_start, grooming_week_end; + double grooming_hour; + double min_depth, max_depth; //minimum depth of snow for grooming, maximum depth affected by grooming +}; +#endif diff --git a/third_party/snowpack/Utils.cc b/third_party/snowpack/Utils.cc index 78aae822..5f619b83 100644 --- a/third_party/snowpack/Utils.cc +++ b/third_party/snowpack/Utils.cc @@ -50,19 +50,17 @@ std::string getLibVersion() { * - "msg-" : [i] [] \ \ * @author Charles Fierz \n Mathias Bavay * @version 11.02 - * @param *theFile + * @param *fileAndPath * @param theLine * @param *msg_type See above * @param date_in Use Date() if date_in is not available. * @param *format Format for message * @param ... Variable number of parameters to format */ -void prn_msg(const char *theFile, const int theLine, const char *msg_type, const mio::Date& date_in, const char *format, ...) +void prn_msg(const char *fileAndPath, const int theLine, const char *msg_type, const mio::Date& date_in, const char *format, ...) { va_list argptr; // get an arg ptr - - // Commented to remove set but not used compiler warning - // int msg_ok = 0; +// int msg_ok = 0; // Initialize argptr to point to the first argument after the format string // va_start(argptr, format); @@ -92,6 +90,18 @@ void prn_msg(const char *theFile, const int theLine, const char *msg_type, const va_start(argptr, format); vsnprintf(orig_msg, len+1, format, argptr); va_end(argptr); + //doing it pure c for performance +#if defined _WIN32 + #if !defined __CYGWIN__ + const char *delim = strrchr(fileAndPath, '\\'); + #else + const char *delim = strrchr(fileAndPath, '/'); + #endif +#else + const char *delim = strrchr(fileAndPath, '/'); +#endif + const char *theFile = delim ? delim + 1 : fileAndPath; + //print message //printf("¬"); //if we need multiline output, use a special char as bloc delimiter if (strcmp(msg_type, "err") == 0) { @@ -309,17 +319,17 @@ void typeToCode(int *F1, int *F2, int *F3, int type) double unitConversion(const double val, char* unitIn, char* unitOut) { if (!strcmp(unitIn,"degK") || !strcmp(unitIn,"°K") || !strcmp(unitIn,"Kelvin")) - unitIn = (char*) "K"; + unitIn = strdup("K"); if (!strcmp(unitOut,"degK") || !strcmp(unitOut,"°K") || !strcmp(unitOut,"Kelvin")) - unitOut = (char*) "K"; + unitOut = strdup("K"); if (!strcmp(unitIn,"degC") || !strcmp(unitIn,"Celsius")) - unitIn = (char*) "°C"; + unitIn = strdup("°C"); if (!strcmp(unitOut,"degC") || !strcmp(unitOut,"Celsius")) - unitOut = (char*) "°C"; + unitOut = strdup("°C"); if (!strcmp(unitIn,"degF") || !strcmp(unitIn,"Fahrenheit")) - unitIn = (char*) "°F"; + unitIn = strdup("°F"); if (!strcmp(unitOut,"degF") || !strcmp(unitOut,"Fahrenheit")) - unitOut = (char*) "°F"; + unitOut = strdup("°F"); if (!strcmp(unitIn,"°C") && !strcmp(unitOut,"K")) { return (val+273.15); @@ -376,7 +386,7 @@ double unitConversion(const double val, char* unitIn, char* unitOut) } return val*ratio; } - throw IOException("Unable to perform unit conversion.", AT); //if we don't missuse the method, this should never be reached + //NOT REACHABLE... throw IOException("Unable to perform unit conversion.", AT); //if we don't missuse the method, this should never be reached } /** @@ -394,7 +404,7 @@ bool massBalanceCheck(const SnowStation& Xdata, const SurfaceFluxes& Sdata, doub bool mass_error = true; double tot_mass=0., tot_swe=0., dmassE=0.; const double psum = Xdata.hn*Xdata.rho_hn; - double mass_change = psum - Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] + Sdata.mass[SurfaceFluxes::MS_RAIN] + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] + Sdata.mass[SurfaceFluxes::MS_EVAPORATION] - std::max(0., Xdata.ErosionMass); + double mass_change = psum - Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] + Sdata.mass[SurfaceFluxes::MS_RAIN] + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] + Sdata.mass[SurfaceFluxes::MS_EVAPORATION] + Sdata.mass[SurfaceFluxes::MS_FLOODING] - std::max(0., Xdata.ErosionMass); // Actual mass of snowpack for (size_t e=Xdata.SoilNode; e Xdata.SoilNode) && (hs + 0.01) < (Xdata.cH - Xdata.Ground) ) { massErode += Xdata.Edata[Xdata.getNumberOfElements()-1].M; Xdata.cH -= Xdata.Edata[Xdata.getNumberOfElements()-1].L; Xdata.resize(Xdata.getNumberOfElements() - 1); - // nErode++; + //nErode++; } Xdata.ErosionLevel = std::min(Xdata.getNumberOfElements()-1, Xdata.ErosionLevel); @@ -472,14 +482,17 @@ double forcedErosion(const double hs, SnowStation& Xdata) * @param Xdata * @param dhs_corr Correction on calculated snow depth (m) * @param mass_corr Mass correction (kg m-2) + * @param prn_check If set to true, output an information message when correcting for a missed erosion event or wrong settling */ -void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_corr, double& mass_corr) +void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_corr, double& mass_corr, const bool &prn_check) { + if (Xdata.mH == IOUtils::nodata) { + cerr << "[E] No measured snow height: cannot execute ALLOW_INFLATE!" << endl; + throw; + } const size_t nE = Xdata.getNumberOfElements(), soil_node = Xdata.SoilNode; const double cH = Xdata.cH - Xdata.Ground; // Calculated snow depth const double mH = Xdata.mH - Xdata.Ground; // Enforced snow depth - //double cH_old; // Temporary snow depth - bool prn_CK = false; vector& NDS = Xdata.Ndata; vector& EMS = Xdata.Edata; @@ -492,10 +505,8 @@ void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_c if ((mH + 0.03) < cH) { dhs_corr = mH - cH; mass_corr = forcedErosion(mH, Xdata); - if (prn_CK) { //HACK - prn_msg(__FILE__, __LINE__, "msg+", Mdata.date, "Missed erosion event detected"); - prn_msg(__FILE__, __LINE__, "msg-", Date(), "Measured Snow Depth:%lf Computed Snow Depth:%lf", - mH, cH); + if (prn_check) { + prn_msg(__FILE__, __LINE__, "msg+", Mdata.date, "Missed erosion event detected, measured_hs=%lf m computed_hs=%lf m mass_corr=%lf cm", mH, cH, mass_corr); } } else if (cH > Constants::eps) { // assume settling error double factor_corr=0., sum_total_correction=0.; @@ -504,12 +515,6 @@ void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_c if (EMS[nE-1].depositionDate.getJulian() <= EMS[soil_node].depositionDate.getJulian()) return; - if (prn_CK) { //HACK - prn_msg(__FILE__, __LINE__, "msg+", Mdata.date, - "Small correction due to assumed settling error"); - prn_msg(__FILE__, __LINE__, "msg-", Date(), - "Enforced Snow Depth:%lf Computed Snow Depth:%lf", mH, cH); - } // Second find the normalization quantity, which we choose to be the age of the layer. dhs_corr = mH - cH; for (size_t e = soil_node; e < nE; e++) { @@ -546,9 +551,7 @@ void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_c if (age_fraction < 0.) { age_fraction = 0.; } - ddL = EMS[e].L - * std::max(-0.9, - std::min(0.9, factor_corr * (1. - sqrt(age_fraction)))); + ddL = EMS[e].L * std::max(-0.9, std::min(0.9, factor_corr * (1. - sqrt(age_fraction)))); } else { ddL = 0.; } @@ -561,6 +564,10 @@ void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_c EMS[e].L0 = EMS[e].L += ddL; EMS[e].E = EMS[e].Eps = EMS[e].dEps = EMS[e].Eps_e = EMS[e].Eps_v = EMS[e].S = 0.0; } + if (prn_check) { + prn_msg(__FILE__, __LINE__, "msg+", Mdata.date, "Correction due to assumed settling error, measured_hs=%lf m computed_hs=%lf m mass_corr=%lf cm", mH, cH, mass_corr); + } + // Update the overall height Xdata.cH = NDS[nE].z + NDS[nE].u; } else { diff --git a/third_party/snowpack/Utils.h b/third_party/snowpack/Utils.h index b8a01c79..3591da2f 100644 --- a/third_party/snowpack/Utils.h +++ b/third_party/snowpack/Utils.h @@ -52,10 +52,10 @@ std::string getLibVersion(); } #ifdef GNU //in this case, GCC can check the format arguments for types, number, ... -void prn_msg(const char *theFile, const int theLine, const char *msg_type, const mio::Date& date_in, const char *format, ...) +void prn_msg(const char *fileAndPath, const int theLine, const char *msg_type, const mio::Date& date_in, const char *format, ...) __attribute__ ((format (printf, 5, 6))); #else -void prn_msg(const char *theFile, const int theLine, const char *msg_type, const mio::Date& date_in, const char *format, ...); +void prn_msg(const char *fileAndPath, const int theLine, const char *msg_type, const mio::Date& date_in, const char *format, ...); #endif bool booleanTime(const double& JulianDate, double days_between, @@ -78,7 +78,7 @@ size_t findUpperNode(const double& z, const std::vector& Ndata, const double forcedErosion(const double hs, SnowStation& Xdata); -void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_corr, double& mass_corr); +void deflateInflate(const CurrentMeteo& Mdata, SnowStation& Xdata, double& dhs_corr, double& mass_corr, const bool &prn_check); double logisticFunction(const double input, const double threshold, const double width); diff --git a/third_party/snowpack/config.dox b/third_party/snowpack/config.dox index 366d65ba..47e0f822 100644 --- a/third_party/snowpack/config.dox +++ b/third_party/snowpack/config.dox @@ -38,7 +38,7 @@ PROJECT_NAME = SNOWPACK # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = SNOWPACK-3.4.1 +PROJECT_NUMBER = $(SN_VERSION) # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -1610,7 +1610,8 @@ PAPER_TYPE = a4 # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. -EXTRA_PACKAGES = +#EXTRA_PACKAGES = amssymb amsthm +EXTRA_PACKAGES = mathtools # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the # generated LaTeX document. The header should contain everything until the first diff --git a/third_party/snowpack/libsnowpack.h b/third_party/snowpack/libsnowpack.h index a96e1279..750666e8 100644 --- a/third_party/snowpack/libsnowpack.h +++ b/third_party/snowpack/libsnowpack.h @@ -35,12 +35,13 @@ #include "SnowDrift.h" #include "SnowpackConfig.h" #include "Stability.h" +#include "TechnicalSnow.h" #include "Utils.h" -#include "plugins/AsciiIO.h" //for direct calls to AsciiIO -#include "plugins/SmetIO.h" //for direct calls to SmetIO #include "plugins/SnowpackIO.h" #include "plugins/SnowpackIOInterface.h" +#include "plugins/AsciiIO.h" //for direct calls to AsciiIO +#include "plugins/SmetIO.h" //for direct calls to SmetIO #include "snowpackCore/Aggregate.h" #include "snowpackCore/Canopy.h" diff --git a/third_party/snowpack/plugins/AsciiIO.cc b/third_party/snowpack/plugins/AsciiIO.cc index 4012f807..54468a03 100644 --- a/third_party/snowpack/plugins/AsciiIO.cc +++ b/third_party/snowpack/plugins/AsciiIO.cc @@ -19,13 +19,13 @@ */ #include "AsciiIO.h" +#include "../Utils.h" +#include "../snowpackCore/Canopy.h" #include "../Constants.h" #include "../Hazard.h" #include "../Laws_sn.h" -#include "../Utils.h" -#include "../snowpackCore/Aggregate.h" -#include "../snowpackCore/Canopy.h" #include "../snowpackCore/Metamorphism.h" +#include "../snowpackCore/Aggregate.h" #define MAX_STRING_LENGTH 256 @@ -75,6 +75,7 @@ const bool AsciiIO::t_gnd = false; * Tlayer temperature [K] * Vol_Frac_Ifractional ice volume [0-1] * Vol_Frac_Wfractional water volume [0-1] + * Vol_Frac_WPfractional preferential flow water volume [0-1] * Vol_Frac_Vfractional voids volume [0-1] * Vol_Frac_Sfractional soil volume [0-1] *
@@ -82,7 +83,7 @@ const bool AsciiIO::t_gnd = false; * FieldDescription * Rho_Ssoil density [kg/m3] * Conduc_Smineral phase soil thermal conductivity [w/(mK)] - * HeatCapac_Smineral phase soil thermal capacity [J/K] + * HeatCapac_Smineral phase soil thermal capacity [J/(kg*K)] * rggrain radius [mm] * rbbond radius [mm] * dddendricity [0-1] @@ -160,7 +161,10 @@ const bool AsciiIO::t_gnd = false; * 0501,nElems,height [> 0: top, < 0: bottom of elem.] (cm) * 0502,nElems,element density (kg m-3) * 0503,nElems,element temperature (degC) + * 0504,nElems,element ID -- or -- element mk (see key PROF_ID_OR_MK) + * 0505,nElems,element age (days) -- or -- element deposition date (see key PROF_AGE_OR_DATE) * 0506,nElems,liquid water content by volume (%) + * 0507,nElems,liquid preferential flow water content by volume (%) * 0508,nElems,dendricity (1) * 0509,nElems,sphericity (1) * 0510,nElems,coordination number (1) @@ -184,12 +188,18 @@ const bool AsciiIO::t_gnd = false; * 0533,nElems,stability index Sk38 * 0534,nElems,hand hardness either (N) or index steps (1) * 0535,nElems,optical equivalent grain size (mm) + * 0540,nElems,bulk salinity (g/kg) + * 0541,nElems,brine salinity (g/kg) + * 0560,nElems,potential lateral flow rate (kg/m3) * 0601,nElems,snow shear strength (kPa) * 0602,nElems,grain size difference (mm) * 0603,nElems,hardness difference (1) * 0604,nElems,structural stability index SSI * 0605,nElems,inverse texture index ITI (Mg m-4) * 0606,nElems,critical cut length (m) + * 0621,nElems,dsm (for NIED only) + * 0622,nElems,Sigdsm (for NIED only) + * 0623,nElems,S_dsm (for NIED only) * * [DATA] * @endcode @@ -307,15 +317,15 @@ snowpack mass,Eroded mass,Rain rate,Surface runoff (without soil infiltration),S */ AsciiIO::AsciiIO(const SnowpackConfig& cfg, const RunInfo& run_info) - : setAppendableFiles(), variant(), experiment(), sw_mode(), + : setAppendableFiles(), metamorphism_model(), variant(), experiment(), sw_mode(), inpath(), snowfile(), i_snowpath(), outpath(), o_snowpath(), info(run_info), vecProfileFmt(), aggregate_prf(false), fixedPositions(), numberMeasTemperatures(0), maxNumberMeasTemperatures(0), numberTags(0), numberFixedSensors(0), totNumberSensors(0), time_zone(0.), calculation_step_length(0.), hazard_steps_between(0.), ts_days_between(0.), - min_depth_subsurf(0.), hoar_density_surf(0.), hoar_min_size_surf(0.), - avgsum_time_series(false), useCanopyModel(false), useSoilLayers(false), research_mode(false), perp_to_slope(false), + min_depth_subsurf(0.), hoar_density_surf(0.), hoar_min_size_surf(0.), useRichardsEq(false), enable_pref_flow(false), enable_ice_reservoir(false), enable_vapour_transport(false), + avgsum_time_series(false), useCanopyModel(false), useSoilLayers(false), research_mode(false), perp_to_slope(false), useReferenceLayer(false), out_heat(false), out_lw(false), out_sw(false), out_meteo(false), out_haz(false), out_mass(false), out_t(false), - out_load(false), out_stab(false), out_canopy(false), out_soileb(false), r_in_n(false) + out_load(false), out_stab(false), out_canopy(false), out_soileb(false), r_in_n(false), prof_ID_or_MK("ID"), prof_AGE_or_DATE("AGE") { //Defines how heights/depths of snow or/and soil temperatures are read in and output \n // Snowpack section @@ -325,15 +335,15 @@ AsciiIO::AsciiIO(const SnowpackConfig& cfg, const RunInfo& run_info) cfg.getValue("SW_MODE", "Snowpack", sw_mode); // Input section - cfg.getValue("METEOPATH", "Input", inpath); - const string in_snowpath = cfg.get("SNOWPATH", "Input"); + cfg.getValue("METEOPATH", "Input", inpath, IOUtils::nothrow); + const std::string in_snowpath = cfg.get("SNOWPATH", "Input", ""); cfg.getValue("TIME_ZONE", "Input", time_zone); // Output section - cfg.getValue("AVGSUM_TIME_SERIES", "Output", avgsum_time_series); + cfg.getValue("AVGSUM_TIME_SERIES", "Output", avgsum_time_series, IOUtils::nothrow); cfg.getValue("EXPERIMENT", "Output", experiment); cfg.getValue("HAZARD_STEPS_BETWEEN", "Output", hazard_steps_between); - cfg.getValue("METEOPATH", "Output", outpath); + cfg.getValue("METEOPATH", "Output", outpath, IOUtils::nothrow); cfg.getValue("OUT_CANOPY", "Output", out_canopy); cfg.getValue("OUT_HAZ", "Output", out_haz); cfg.getValue("OUT_HEAT", "Output", out_heat); @@ -345,19 +355,40 @@ AsciiIO::AsciiIO(const SnowpackConfig& cfg, const RunInfo& run_info) cfg.getValue("OUT_STAB", "Output", out_stab); cfg.getValue("OUT_SW", "Output", out_sw); cfg.getValue("OUT_T", "Output", out_t); - cfg.getValue("HARDNESS_IN_NEWTON", "Output", r_in_n); - const string out_snowpath = cfg.get("SNOWPATH", "Output"); + cfg.getValue("HARDNESS_IN_NEWTON", "Output", r_in_n, IOUtils::nothrow); + const std::string out_snowpath = cfg.get("SNOWPATH", "Output", ""); cfg.getValue("TS_DAYS_BETWEEN", "Output", ts_days_between); cfg.getValue("PROF_FORMAT", "Output", vecProfileFmt); + cfg.getValue("PROF_ID_OR_MK", "Output", prof_ID_or_MK); + if (prof_ID_or_MK != "ID" && prof_ID_or_MK != "MK") { + throw InvalidArgumentException("Unknown value for PROF_ID_OR_MK: "+prof_ID_or_MK+". Please specify if element 0504 in *.pro file should contain \"ID\" or \"MK\"", AT); + } + cfg.getValue("PROF_AGE_OR_DATE", "Output", prof_AGE_or_DATE); + if (prof_AGE_or_DATE != "AGE" && prof_AGE_or_DATE != "DATE") { + throw InvalidArgumentException("Unknown value for PROF_AGE_OR_DATE: "+prof_AGE_or_DATE+". Please specify if element 0505 in *.pro file should contain \"AGE\" or \"DATE\"", AT); + } cfg.getValue("AGGREGATE_PRF", "Output", aggregate_prf); + cfg.getValue("USEREFERENCELAYER", "Output", useReferenceLayer, IOUtils::nothrow); // SnowpackAdvanced section cfg.getValue("HOAR_DENSITY_SURF", "SnowpackAdvanced", hoar_density_surf); // Density of SH at surface node (kg m-3) cfg.getValue("HOAR_MIN_SIZE_SURF", "SnowpackAdvanced", hoar_min_size_surf); // Minimum size to show SH on surface (mm) + cfg.getValue("METAMORPHISM_MODEL", "SnowpackAdvanced", metamorphism_model, IOUtils::nothrow); cfg.getValue("MIN_DEPTH_SUBSURF", "SnowpackAdvanced", min_depth_subsurf); cfg.getValue("PERP_TO_SLOPE", "SnowpackAdvanced", perp_to_slope); cfg.getValue("RESEARCH", "SnowpackAdvanced", research_mode); cfg.getValue("VARIANT", "SnowpackAdvanced", variant); + cfg.getValue("PREF_FLOW", "SnowpackAdvanced", enable_pref_flow); + cfg.getValue("ICE_RESERVOIR", "SnowpackAdvanced", enable_ice_reservoir); + cfg.getValue("ENABLE_VAPOUR_TRANSPORT", "SnowpackAdvanced", enable_vapour_transport); //Enable vapour transport + + //Check for use of Richards Equation + useRichardsEq = false; + std::string tmp_useRichardsEq; + cfg.getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", tmp_useRichardsEq); + if (tmp_useRichardsEq=="RICHARDSEQUATION") useRichardsEq = true; + cfg.getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", tmp_useRichardsEq); + if (tmp_useRichardsEq=="RICHARDSEQUATION") useRichardsEq = true; i_snowpath = (in_snowpath.empty())? inpath : in_snowpath; o_snowpath = (out_snowpath.empty())? outpath : out_snowpath; @@ -366,6 +397,7 @@ AsciiIO::AsciiIO(const SnowpackConfig& cfg, const RunInfo& run_info) AsciiIO& AsciiIO::operator=(const AsciiIO& source) { if (this != &source) { setAppendableFiles = source.setAppendableFiles; + metamorphism_model = source.metamorphism_model; variant = source.variant; experiment = source.experiment; sw_mode = source.sw_mode; @@ -390,11 +422,15 @@ AsciiIO& AsciiIO::operator=(const AsciiIO& source) { min_depth_subsurf = source.min_depth_subsurf; hoar_density_surf = source.hoar_density_surf; hoar_min_size_surf = source.hoar_min_size_surf; + useRichardsEq = source.useRichardsEq; + enable_pref_flow = source.enable_pref_flow; + enable_ice_reservoir = source.enable_ice_reservoir; avgsum_time_series = source.avgsum_time_series; useCanopyModel = source.useCanopyModel; useSoilLayers = source.useSoilLayers; research_mode = source.research_mode; perp_to_slope = source.perp_to_slope; + useReferenceLayer = source.useReferenceLayer; out_heat = source.out_heat; out_lw = source.out_lw; out_sw = source.out_sw; @@ -407,6 +443,7 @@ AsciiIO& AsciiIO::operator=(const AsciiIO& source) { out_canopy = source.out_canopy; out_soileb = source.out_soileb; r_in_n = source.r_in_n; + enable_vapour_transport = source.enable_vapour_transport; } return *this; } @@ -435,9 +472,10 @@ bool AsciiIO::snowCoverExists(const std::string& i_snowfile, const std::string& * @param stationID * @param SSdata * @param Zdata + * @param read_salinity */ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata) + SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata, const bool&) { string snofilename = getFilenamePrefix(i_snowfile, i_snowpath, false); if (snofilename.rfind(".snoold") == string::npos) { @@ -500,7 +538,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st // Check consistency with radiation switch if ((sw_mode == "BOTH") && perp_to_slope && (SSdata.meta.getSlopeAngle() > Constants::min_slope_angle)) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "wrn", Date(), "You want to use measured albedo in a slope steeper than 3 deg with PERP_TO_SLOPE set!"); throw IOException("Do not generate Xdata from file "+snofilename, AT); @@ -508,43 +546,43 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st // Check consistency of nXLayerData if (fscanf(fin, "\nnSoilLayerData=%d", &dum) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Missing 'nSoilLayerData'"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } if (dum < 0) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "'nSoilLayerData' < 0 !!!"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } else if (useSoilLayers && (dum < 1)) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "useSoilLayers set but 'nSoilLayerData' < 1 !!!"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } else if (!useSoilLayers && (dum > 0)) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "useSoilLayers not set but 'nSoilLayerData' > 0 !!!"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } SSdata.nLayers = static_cast(dum); //we checked that it is >0 if (fscanf(fin, "\nnSnowLayerData=%d", &dum) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Missing 'nSnowLayerData'"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } if (dum < 0) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "'nSnowLayerData' < 0 !!!"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } SSdata.nLayers += static_cast(dum); //we checked that it is >0 if (fscanf(fin, "\nSoilAlbedo=%lf", &SSdata.SoilAlb) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Missing 'SoilAlbedo'"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } if (fscanf(fin, "\nBareSoil_z0=%lf", &SSdata.BareSoil_z0) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Missing 'BareSoil_z0'"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } @@ -585,7 +623,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st } if (fscanf(fin,"\nYYYY") < 0) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Failed reading layer header starting with 'YYYY'"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } @@ -600,13 +638,13 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st SSdata.Ldata.resize(SSdata.nLayers, LayerData()); for (size_t ll = 0; ll < SSdata.nLayers; ll++) { if ((nFields = fscanf(fin, " %d %d %d %d %d", &YYYY, &MM, &DD, &HH, &MI)) != 5) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Failed reading date: read %d fields", nFields); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } SSdata.Ldata[ll].depositionDate = Date::rnd(Date(YYYY, MM, DD, HH, MI, time_zone), 1.); if (SSdata.Ldata[ll].depositionDate > SSdata.profileDate) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Layer %u from bottom is younger (%s) than ProfileDate (%s) !!!", ll+1, SSdata.Ldata[ll].depositionDate.toString(Date::ISO).c_str(), SSdata.profileDate.toString(Date::ISO).c_str()); @@ -623,16 +661,17 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st if ((nFields = fscanf(fin, " %lf %lf %lf %lf %lf %lf", &SSdata.Ldata[ll].hl, &SSdata.Ldata[ll].tl, &SSdata.Ldata[ll].phiIce, &SSdata.Ldata[ll].phiWater, &SSdata.Ldata[ll].phiVoids, &SSdata.Ldata[ll].phiSoil)) != 6) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Failed reading hl etc: read %d of 6 fields", nFields); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } + SSdata.Ldata[ll].phiWaterPref = 0.; if (SSdata.Ldata[ll].tl < 100.) { SSdata.Ldata[ll].tl = IOUtils::C_TO_K(SSdata.Ldata[ll].tl); } if ((nFields = fscanf(fin, "%lf %lf %lf", &SSdata.Ldata[ll].SoilRho, &SSdata.Ldata[ll].SoilK, &SSdata.Ldata[ll].SoilC)) != 3) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Failed reading SoilRho etc: read %d of 3 fields", nFields); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } @@ -646,7 +685,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st } SSdata.Ldata[ll].ne = static_cast(ne_tmp); if (SSdata.Ldata[ll].phiSoil==0. && (SSdata.Ldata[ll].rg<=0. || SSdata.Ldata[ll].rb<=0.)) { //Test only for snow layers - fclose(fin); + fclose(fin); std::stringstream ss; ss << "Invalid grain specification in layer " << ll+1 << " (from bottom) of file " << snofilename << ": "; ss << "grain radius = " << SSdata.Ldata[ll].rg << " bond radius = " << SSdata.Ldata[ll].rb; @@ -660,7 +699,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st ll+1, Metamorphism::max_grain_bond_ratio, SSdata.Ldata[ll].rb, SSdata.Ldata[ll].rg); } if ((nFields = fscanf(fin, "%lf %lf", &SSdata.Ldata[ll].CDot, &SSdata.Ldata[ll].metamo)) != 2) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Failed reading CDot etc: read %d of 2 fields", nFields); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } @@ -668,7 +707,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st if ((nFields = fscanf(fin," %lf %lf %lf %lf ", &SSdata.Ldata[ll].cIce[ii], &SSdata.Ldata[ll].cWater[ii], &SSdata.Ldata[ll].cVoids[ii], &SSdata.Ldata[ll].cSoil[ii])) != 4) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "Failed reading impurity concentrations: read %d of 4 fields", nFields); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); @@ -683,7 +722,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st } for (size_t ii = 0; ii < 48; ii++) { if (fscanf(fin," %lf ", &Zdata.hoar24[ii]) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "While reading hoar data (48) !!!"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } @@ -694,7 +733,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st } for (size_t ii = 0; ii < 48; ii++) { if (fscanf(fin," %lf ", &Zdata.drift24[ii]) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "While reading drift data (48) !!!"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } @@ -705,7 +744,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st } for (size_t ii = 0; ii < 144; ii++) { if (fscanf(fin," %lf ", &Zdata.hn3[ii]) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "While reading hn(3h) data (144) !!!"); throw InvalidFormatException("While reading Zdata (hns3) !!!", AT); } @@ -716,7 +755,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st } for (size_t ii = 0; ii < 144; ii++) { if (fscanf(fin," %lf ", &Zdata.hn24[ii]) != 1) { - fclose(fin); + fclose(fin); prn_msg(__FILE__, __LINE__, "err", Date(), "While reading hn(24h) data (144) !!!"); throw InvalidFormatException("Cannot generate Xdata from file "+snofilename, AT); } @@ -725,7 +764,7 @@ void AsciiIO::readSnowCover(const std::string& i_snowfile, const std::string& st SSdata.nN = 1; SSdata.Height = 0.; for (size_t ll = 0; ll < SSdata.nLayers; ll++) { - SSdata.nN += SSdata.Ldata[ll].ne; + SSdata.nN += SSdata.Ldata[ll].ne; SSdata.Height += SSdata.Ldata[ll].hl; } @@ -747,7 +786,7 @@ void AsciiIO::writeSnowCover(const mio::Date& date, const SnowStation& Xdata, string snofilename = getFilenamePrefix(Xdata.meta.getStationID().c_str(), o_snowpath) + ".snoold"; if (forbackup){ stringstream ss; -// ss << (int)(date.getJulian() + 0.5); +// ss << (date.toString(Date::NUM)); snofilename += ss.str(); } @@ -774,7 +813,7 @@ void AsciiIO::writeSnowCover(const mio::Date& date, const SnowStation& Xdata, // Latitude, Longitude, Altitude, Slope Angle, Slope Azimut fout << "Latitude= " << fixed << std::setw(11) << std::setprecision(8) << Xdata.meta.position.getLat() << "\n"; fout << "Longitude= "<< fixed << std::setw(11) << std::setprecision(8) << Xdata.meta.position.getLon() << "\n"; - fout << "Altitude= " << fixed << setprecision(0) << Xdata.meta.position.getAltitude() << "\n"; + fout << "Altitude= " << fixed << setprecision(0) << Xdata.meta.position.getAltitude() << "\n"; fout << "SlopeAngle= " << fixed << setprecision(2) << Xdata.meta.getSlopeAngle() << "\n"; fout << "SlopeAzi= " << fixed << setprecision(2) << Xdata.meta.getAzimuth() << "\n"; @@ -916,8 +955,13 @@ void AsciiIO::writeProfilePro(const mio::Date& i_date, const SnowStation& Xdata, const double cos_sl = Xdata.cos_sl; const bool no_snow = (nE == Xdata.SoilNode); + // Are we using sea ice variant? Check if the object is defined via the pointer: + const bool SeaIce = (Xdata.Seaice==NULL)?(false):(true); + // Check reference level: either a marked reference level, or, if non existent, the sea level (if sea ice module is used), otherwise 0: + const double ReferenceLevel = ( Xdata.findMarkedReferenceLayer()==Constants::undefined || !useReferenceLayer ) ? ( (Xdata.Seaice==NULL)?(0.):(Xdata.Seaice->SeaLevel) ) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground); + // 501: height [> 0: top, < 0: bottom of elem.] (cm) - const size_t nz = (useSoilLayers)? nN : nE; + const size_t nz = (useSoilLayers || SeaIce)? nN : nE; if(nE==0) { fout << "\n0501,1,0"; fout.close(); @@ -925,8 +969,14 @@ void AsciiIO::writeProfilePro(const mio::Date& i_date, const SnowStation& Xdata, } else { fout << "\n0501," << nz; } - for (size_t n = nN-nz; n < nN; n++) - fout << "," << std::fixed << std::setprecision(2) << M_TO_CM((NDS[n].z+NDS[n].u - NDS[Xdata.SoilNode].z)/cos_sl); + for (size_t n = nN-nz; n < nN; n++) { + if (SeaIce) { + //Correct for sea level: + fout << "," << std::fixed << std::setprecision(2) << M_TO_CM((NDS[n].z+NDS[n].u - NDS[Xdata.SoilNode].z - ReferenceLevel)/cos_sl); + } else { + fout << "," << std::fixed << std::setprecision(2) << M_TO_CM((NDS[n].z+NDS[n].u - NDS[Xdata.SoilNode].z)/cos_sl); + } + } // 0502: element density (kg m-3) fout << "\n0502," << nE; for (size_t e = 0; e < nE; e++) @@ -935,10 +985,29 @@ void AsciiIO::writeProfilePro(const mio::Date& i_date, const SnowStation& Xdata, fout << "\n0503," << nE; for (size_t e = 0; e < nE; e++) fout << "," << std::fixed << std::setprecision(2) << IOUtils::K_TO_C(EMS[e].Te); + // 0504: element ID + fout << "\n0504," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::fixed << std::setprecision(0) << ((prof_ID_or_MK == "ID") ? (EMS[e].ID) : (EMS[e].mk)); + // 0505: element age + fout << "\n0505," << nE; + for (size_t e = 0; e < nE; e++) { + if (prof_AGE_or_DATE == "AGE") { + fout << "," << std::fixed << std::setprecision(2) << (i_date.getJulian() - EMS[e].depositionDate.getJulian()); + } else { + fout << "," << EMS[e].depositionDate.toString(Date::ISO); + } + } // 0506: liquid water content by volume (%) fout << "\n0506," << nE; for (size_t e = 0; e < nE; e++) fout << "," << std::fixed << std::setprecision(1) << 100.*EMS[e].theta[WATER]; + // 0507: liquid preferential flow water content by volume (%) + if(enable_pref_flow) { + fout << "\n0507," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::fixed << std::setprecision(3) << 100.*EMS[e].theta[WATER_PREF]; + } // 0508: snow dendricity (1) if (no_snow) { fout << "\n0508,1,0"; @@ -1038,11 +1107,23 @@ void AsciiIO::writeProfilePro(const mio::Date& i_date, const SnowStation& Xdata, for (size_t e = Xdata.SoilNode; e < nE; e++) fout << "," << std::fixed << std::setprecision(1) << 1.e6*EMS[e].Eps_vDot; } + // 0524: ice reservoir content by volume (%) + if(enable_ice_reservoir) { + fout << "\n0524," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::fixed << std::setprecision(3) << 100.*EMS[e].theta_i_reservoir; + } + // 0525: cumulated ice reservoir content by volume (%) + if(enable_ice_reservoir) { + fout << "\n0525," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::fixed << std::setprecision(3) << 100.*EMS[e].theta_i_reservoir_cumul; + } // 0530: position (cm) and minimum stability indices fout << "\n0530,8"; - fout << "," << std::fixed << +Xdata.S_class1 << "," << +Xdata.S_class2; //force printing type char as numerica value - fout << "," << std::setprecision(1) << M_TO_CM(Xdata.z_S_d/cos_sl) << "," << std::setprecision(2) << Xdata.S_d; - fout << "," << std::fixed << std::setprecision(1) << M_TO_CM(Xdata.z_S_n/cos_sl) << "," << std::setprecision(2) << Xdata.S_n; + fout << "," << std::fixed << +Xdata.S_class1 << "," << +Xdata.S_class2; //force printing type char as numerical value + fout << "," << std::setprecision(1) << M_TO_CM(Xdata.z_S_d/cos_sl) << "," << std::setprecision(2) << Xdata.S_d; + fout << "," << std::fixed << std::setprecision(1) << M_TO_CM(Xdata.z_S_n/cos_sl) << "," << std::setprecision(2) << Xdata.S_n; fout << "," << std::setprecision(1) << M_TO_CM(Xdata.z_S_s/cos_sl) << "," << std::fixed << std::setprecision(2) << Xdata.S_s; // 0531: deformation rate stability index Sdef if (no_snow) { @@ -1057,7 +1138,7 @@ void AsciiIO::writeProfilePro(const mio::Date& i_date, const SnowStation& Xdata, fout << "\n0532,1,0"; } else { fout << "\n0532," << nE-Xdata.SoilNode; - for (size_t e = Xdata.SoilNode; e < nE; e++) + for (size_t e = Xdata.SoilNode; e < nE; e++) fout << "," << std::fixed << std::setprecision(2) << NDS[e+1].S_n; } // 0533: stability index Sk38 @@ -1089,11 +1170,60 @@ void AsciiIO::writeProfilePro(const mio::Date& i_date, const SnowStation& Xdata, for (size_t e = Xdata.SoilNode; e < nE; e++) fout << "," << std::fixed << std::setprecision(2) << EMS[e].ogs; } + if (Xdata.Seaice!=NULL) { + // 0540: bulk salinity (g/kg) + if (no_snow) { + fout << "\n0540,1,0"; + } else { + fout << "\n0540," << nE-Xdata.SoilNode; + for (size_t e = Xdata.SoilNode; e < nE; e++) + fout << "," << std::fixed << std::setprecision(2) << EMS[e].salinity; + } + // 0541: bulk salinity (g/kg) + if (no_snow) { + fout << "\n0541,1,0"; + } else { + fout << "\n0541," << nE-Xdata.SoilNode; + for (size_t e = Xdata.SoilNode; e < nE; e++) + fout << "," << std::fixed << std::setprecision(2) << ((EMS[e].theta[WATER] == 0.) ? (mio::IOUtils::nodata) : (EMS[e].salinity / EMS[e].theta[WATER])); + } + } + // 0560: potential lateral flow rate (kg/m3), only when using Richards equation and the SnowStation is on a slope + if (useRichardsEq && Xdata.meta.getSlopeAngle() > 0.) { + fout << "\n0560," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::fixed << std::setprecision(12) << EMS[e].SlopeParFlux * Constants::density_water; + } if (variant == "CALIBRATION") writeProfileProAddCalibration(Xdata, fout); else writeProfileProAddDefault(Xdata, fout); + if(enable_vapour_transport) { + // 0901: the degree of undersaturation, (rhov-rohv_sat)/rhov_sat (-) + fout << "\n0901," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::scientific << std::setprecision(6) << 1.e2*EMS[e].vapTrans_underSaturationDegree; + + // 0902: the water vapor diffusion flux (kg m-2 s-1) + fout << "\n0902," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::scientific << std::setprecision(6) << 1.e7*EMS[e].vapTrans_fluxDiff; + + // 0903: the cumulative density change due to water vapor transport (kg m-3) + fout << "\n0903," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::scientific << std::setprecision(6) << EMS[e].vapTrans_cumulativeDenChange; + + // 0904: the snow density change rate due to water vapor transport (1.0e-6 kg m-3) + fout << "\n0904," << nE; + for (size_t e = 0; e < nE; e++) + fout << "," << std::scientific << std::setprecision(6) << 1.e6*EMS[e].vapTrans_snowDenChangeRate; + + // 0905: element tracking for making comparison of any snow properties between two simulation, (-) + fout << "\n0905," << nE; + } + fout.close(); } @@ -1153,10 +1283,33 @@ void AsciiIO::writeProfileProAddDefault(const SnowStation& Xdata, std::ofstream for (size_t e = Xdata.SoilNode; e < nE; e++) { fout << "," << std::fixed << std::setprecision(2) << EMS[e].crit_cut_length; } + if (metamorphism_model == "NIED") { + // 0621: Dry snow metamorphism factor + fout << "\n0621," << nE-Xdata.SoilNode; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + fout << "," << std::fixed << std::setprecision(2) << EMS[e].dsm; + } + // 0622: Sigdsm + fout << "\n0622," << nE-Xdata.SoilNode; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + fout << "," << std::fixed << std::setprecision(2) << NDS[e+1].Sigdsm; + } + // 0623: S_dsm + fout << "\n0623," << nE-Xdata.SoilNode; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + fout << "," << std::fixed << std::setprecision(2) << NDS[e+1].S_dsm; + } + } } else { for (size_t jj = 1; jj < 7; jj++) { fout << "\n060" << jj << ",1,0"; } + if (metamorphism_model == "NIED") { + for (size_t jj = 1; jj < 4; jj++) { + fout << "\n062" << jj << ",1,0"; + } + } + } } @@ -1222,11 +1375,11 @@ void AsciiIO::writeProfileProAddCalibration(const SnowStation& Xdata, std::ofstr // 0704: SNOWPACK: total settling rate (% h-1) fout << "\n0704," << nE-Xdata.SoilNode; for (size_t e=Xdata.SoilNode; e Pdata( SnowProfileLayer::generateProfile(dateOfProfile, Xdata, hoar_density_surf, hoar_min_size_surf) ); if (aggregate) { Aggregate::aggregate(Pdata); @@ -1307,17 +1460,17 @@ void AsciiIO::writeProfilePrf(const mio::Date& dateOfProfile, const SnowStation& const size_t nL = Pdata.size(); ofs << nL << "," << setprecision(1) << Pdata[nL-1].height << "," << Xdata.swe << "," << Xdata.lwc_sum << ","; ofs << Pdata[nL-1].T << "," << IOUtils::K_TO_C(Xdata.Ndata[Xdata.SoilNode].T) << "\n"; - + //Minima of stability indices at their respective depths as well as stability classifications ofs << "#Stab,stab_height,stab_index,stab_class1,stab_class2\n"; ofs << "# ,cm,1,1,1\n"; ofs << "deformation," << setprecision(1) << M_TO_CM(Xdata.z_S_d/cos_sl) << "," << setprecision(2) << Xdata.S_d << ","; - ofs << +Xdata.S_class1 << "," << +Xdata.S_class2 << "\n"; //force printing type char as numerica value + ofs << +Xdata.S_class1 << "," << +Xdata.S_class2 << "\n"; //force printing type char as numerical value ofs << "natural," << setprecision(1) << M_TO_CM(Xdata.z_S_n/cos_sl) << "," << setprecision(2) << Xdata.S_n << "\n"; ofs << "ssi," << setprecision(1) << M_TO_CM(Xdata.z_S_s/cos_sl) << "," << setprecision(2) << Xdata.S_s << "\n"; ofs << "S4," << setprecision(1) << M_TO_CM(Xdata.z_S_4/cos_sl) << "," << setprecision(2) << Xdata.S_4 << "\n"; ofs << "S5," << setprecision(1) << M_TO_CM(Xdata.z_S_5/cos_sl) << "," << setprecision(2) << Xdata.S_5 << "\n"; - + //Now write all layers starting from the ground if (aggregate) ofs << "#Aggregated profile\n"; @@ -1334,7 +1487,7 @@ void AsciiIO::writeProfilePrf(const mio::Date& dateOfProfile, const SnowStation& ofs << Pdata[ll].type << "," << Pdata[ll].marker << "," << setprecision(1) << Pdata[ll].hard << "\n"; } ofs << "\n\n"; - + ofs.close(); } @@ -1472,7 +1625,7 @@ size_t AsciiIO::writeHeightTemperatureTag(std::ofstream &fout, const size_t& tag if (perp_pos == Constants::undefined) { fout << ",," << std::fixed << std::setprecision(2) << Constants::undefined; } else { - fout << "," << std::fixed << std::setprecision(2) << M_TO_CM(perp_pos)/cos_sl; + fout << "," << std::fixed << std::setprecision(2) << M_TO_CM(perp_pos)/cos_sl; const double temp = checkMeasuredTemperature(Mdata.ts.at(ii), perp_pos, Xdata.mH); fout << "," << std::fixed << std::setprecision(2) << temp; } @@ -1618,10 +1771,7 @@ bool AsciiIO::parsePrfFile(const char& eoln, const mio::Date& start_date, std::i IOUtils::readLineToVec(tmpline, vecTmp, ','); IOUtils::readLineToVec(tmpline, vecTmp, ','); if (vecTmp[1].length() >= 16) { - const string tmpdate = vecTmp[1].substr(0,15 - - - ); + const string tmpdate = vecTmp[1].substr(0,15); IOUtils::convertString(current_date, tmpdate, time_zone); if (current_date.getJulian() < (start_date.getJulian()-0.00001)){ @@ -1757,6 +1907,12 @@ void AsciiIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sda // Check for availability of measured snow/soil temperatures setNumberSensors(Mdata); + // Correction for snow depth. If we have a marked reference layer, then subtract the height of the reference layer in the output. + const double HScorrC = (Xdata.findMarkedReferenceLayer()==IOUtils::nodata || !useReferenceLayer) ? (0.) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground); + + // Check for number of computation steps in between output steps + const size_t nCalcSteps = (!avgsum_time_series) ? (static_cast(ts_days_between / M_TO_D(calculation_step_length) + 0.5)) : (1); + // Check file for header if (!checkHeader(Xdata, filename, "met", "[STATION_PARAMETERS]")) { prn_msg(__FILE__, __LINE__, "err", Mdata.date, "Checking header in file %s", filename.c_str()); @@ -1774,12 +1930,12 @@ void AsciiIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sda fout << std::fixed << std::setprecision(6); if (out_heat) // 1-2: Turbulent fluxes (W m-2) - fout << "," << Sdata.qs << "," << Sdata.ql; + fout << "," << Sdata.qs << "," << Sdata.ql; else fout << ",,"; if (out_lw) // 3-5: Longwave radiation fluxes (W m-2) - fout << "," << Sdata.lw_out << "," << Sdata.lw_in << "," << Sdata.lw_net; + fout << "," << Sdata.lw_out << "," << Sdata.lw_in << "," << Sdata.lw_net; else fout << ",,,"; if (out_sw) @@ -1807,8 +1963,8 @@ void AsciiIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sda // 23-26: rH (%), wind (m s-1), wind_drift (m s-1), wind_dir (deg), // 27: solid precipitation rate (kg m-2 h-1), // 28-29: modeled and enforced vertical snow depth (cm); see also 51 - fout << "," << 100.*Mdata.rh << "," << Mdata.vw << "," << Mdata.vw_drift << "," << Mdata.dw << "," << Sdata.mass[SurfaceFluxes::MS_HNW]; - fout << "," << std::fixed << std::setprecision(2) << M_TO_CM((Xdata.cH - Xdata.Ground)/cos_sl) << ","; + fout << "," << 100.*Mdata.rh << "," << Mdata.vw << "," << Mdata.vw_drift << "," << Mdata.dw << "," << Sdata.mass[SurfaceFluxes::MS_HNW]; + fout << "," << std::fixed << std::setprecision(2) << M_TO_CM((Xdata.cH - Xdata.Ground - HScorrC)/cos_sl) << ","; if (Xdata.mH!=Constants::undefined) fout << M_TO_CM((Xdata.mH - Xdata.Ground)/cos_sl) << std::setprecision(6); else @@ -1825,21 +1981,21 @@ void AsciiIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sda } else { if(out_soileb) { // 30-33: soil energy balance variables - size_t nCalcSteps = 1; - nCalcSteps = static_cast(ts_days_between / M_TO_D(calculation_step_length) + 0.5); fout << "," << (Sdata.dIntEnergySoil * static_cast(nCalcSteps)) / 1000. << "," << (Sdata.meltFreezeEnergySoil * static_cast(nCalcSteps)) / 1000. << "," << Xdata.ColdContentSoil/1E6 << "," << Hdata.hn72_24; } else { fout << ",,,,"; } } if (out_mass) { - // 34-39: SWE, eroded mass, rain rate, runoff at bottom of snowpack, sublimation and evaporation, all in kg m-2 except rain as rate: kg m-2 h-1; see also 52 & 93 - fout << "," << Sdata.mass[SurfaceFluxes::MS_SWE]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_WIND]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_RAIN]; - fout << "," << Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_SUBLIMATION]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_EVAPORATION]/cos_sl; + // 34-40: SWE (kg m-2), eroded mass (kg m-2 h-1), rain rate (kg m-2 h-1), runoff at bottom of snowpack (kg m-2), + // runoff at the soil surface (kg m-2), sublimation and evaporation (both in kg m-2); see also 53 & 94. + // Note: in operational mode, runoff at bottom of snowpack is expressed as kg m-2 h-1 when !cumsum_mass. + fout << "," << Sdata.mass[SurfaceFluxes::MS_SWE]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_WIND]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_RAIN]; + fout << "," << Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_SUBLIMATION]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_EVAPORATION]/cos_sl; } else { fout << ",,,,,,"; } - // 40-49: Internal Temperature Time Series at fixed heights, modeled and measured, all in degC + // 41-50: Internal Temperature Time Series at fixed heights, modeled and measured, all in degC if (out_t && (fixedPositions.size() || Mdata.getNumberFixedRates())) { const size_t nrFixedPositions = std::min((size_t)5, fixedPositions.size()); if (Mdata.zv_ts.size()!=nrFixedPositions || Mdata.ts.size()!=nrFixedPositions) { @@ -1858,24 +2014,24 @@ void AsciiIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sda fout << ",,,,,,,,,,"; } if (maxNumberMeasTemperatures == 5) { - // 50: Solute load at ground surface - if (out_load) + // 51: Solute load at ground surface + if (out_load && !Sdata.load.empty()) fout << "," << Sdata.load[0]; else fout << ","; - // 51: input snow depth HS (cm); see also 28-29 + // 52: input snow depth HS (cm); see also 28-29 if (out_meteo) fout << "," << std::fixed << std::setprecision(2) << M_TO_CM(Mdata.hs)/cos_sl << std::setprecision(6); else fout << ","; - // 52: LWC (kg m-2); see also 34-39 + // 52-54: LWC (kg m-2); see also 34-39 if (out_mass) - fout << "," << Sdata.mass[SurfaceFluxes::MS_WATER]/cos_sl; + fout << "," << Sdata.mass[SurfaceFluxes::MS_WATER]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_WATER_SOIL]/cos_sl << "," << Sdata.mass[SurfaceFluxes::MS_ICE_SOIL]/cos_sl; else fout << ","; - // 53-64: Stability Time Series, heights in cm + // 55-66: Stability Time Series, heights in cm if (out_stab) { - fout << "," << +Xdata.S_class1 << "," << +Xdata.S_class2 << std::fixed; //profile type and stability class, force printing type char as numerica value + fout << "," << +Xdata.S_class1 << "," << +Xdata.S_class2 << std::fixed; //profile type and stability class, force printing type char as numerical value fout << "," << std::setprecision(1) << M_TO_CM(Xdata.z_S_d/cos_sl) << "," << std::setprecision(2) << Xdata.S_d; fout << "," << std::setprecision(1) << M_TO_CM(Xdata.z_S_n/cos_sl) << "," << std::setprecision(2) << Xdata.S_n; fout << "," << std::setprecision(1) << M_TO_CM(Xdata.z_S_s/cos_sl) << "," << std::setprecision(2) << Xdata.S_s; @@ -1885,13 +2041,34 @@ void AsciiIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sda } else { fout << ",,,,,,,,,,,,"; } - // 65-92 (28 columns) + // 67-94 (28 columns) if (out_canopy && useCanopyModel) Canopy::DumpCanopyData(fout, &Xdata.Cdata, &Sdata, cos_sl); - else - fout << ",,,,,,,,,,,,,,,,,,,,,,,,,,,,"; + else { + if (variant == "SEAICE" && Xdata.Seaice != NULL) { + // Total thickness (m), Ice thickness (m), snow thickness (m), snow thickness wrt reference (m), freeboard (m), sea level (m), bulk salinity, average bulk salinity, average brine salinity, bottom salinity flux, top salinity flux, MS_FLOODING, MS_ICEBASE_MELTING_FREEZING + fout << "," << std::setprecision(3) << Xdata.cH - Xdata.Ground; + fout << "," << std::setprecision(3) << Xdata.Ndata[Xdata.Seaice->IceSurfaceNode].z - Xdata.Ground; + fout << "," << std::setprecision(3) << Xdata.Ndata[Xdata.getNumberOfNodes()-1].z - Xdata.Ndata[Xdata.Seaice->IceSurfaceNode].z; + // Check reference level: either a marked reference level, or, if non existent, the sea level (if sea ice module is used), otherwise 0: + const double ReferenceLevel = ( Xdata.findMarkedReferenceLayer()==IOUtils::nodata || !useReferenceLayer ) ? ( (Xdata.Seaice==NULL)?(0.):(Xdata.Seaice->SeaLevel) ) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground); + fout << "," << std::setprecision(3) << Xdata.Ndata[Xdata.getNumberOfNodes()-1].z - ReferenceLevel; + fout << "," << std::setprecision(3) << Xdata.Seaice->FreeBoard; + fout << "," << std::setprecision(3) << Xdata.Seaice->SeaLevel; + fout << "," << std::setprecision(3) << Xdata.Seaice->getTotSalinity(Xdata); + fout << "," << std::setprecision(3) << Xdata.Seaice->getAvgBulkSalinity(Xdata); + fout << "," << std::setprecision(3) << Xdata.Seaice->getAvgBrineSalinity(Xdata); + fout << "," << std::setprecision(3) << Xdata.Seaice->BottomSalFlux; + fout << "," << std::setprecision(3) << Xdata.Seaice->TopSalFlux; + fout << "," << Sdata.mass[SurfaceFluxes::MS_FLOODING]/cos_sl; + fout << "," << Sdata.mass[SurfaceFluxes::MS_ICEBASE_MELTING_FREEZING]/cos_sl; + fout << ",,,,,,,,,,,,,,,"; + } else { + fout << ",,,,,,,,,,,,,,,,,,,,,,,,,,,,"; + } + } } else if (out_t) { - // 50-93 (44 columns) + // 52-95 (44 columns) size_t ii, jj = 0; for (ii = std::min((size_t)5, fixedPositions.size()); ii < numberFixedSensors; ii++) { if ((jj += writeTemperatures(fout, Mdata.zv_ts.at(ii), Mdata.ts.at(ii), ii, Xdata)) > 44) { @@ -1900,31 +2077,13 @@ void AsciiIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sda throw IOException("Writing Time Series data failed", AT); } } - if (Xdata.tag_low) { - size_t tag = Xdata.tag_low, j_lim; - while ( (tag + ii) <= numberFixedSensors ) { - if ((tag + ii) <= numberMeasTemperatures) - j_lim = 41; - else - j_lim = 43; - if (jj < j_lim) { - jj += writeHeightTemperatureTag(fout, tag, Mdata, Xdata); - tag++; - } else { - break; - } - } - } - for (; jj < 44; jj++) + for (; jj < 45; jj++) fout << ","; } else { fout << ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"; } - // 93[94]-100 (8 or 7 free columns) - size_t nCalcSteps = 1; + // 96[94]-102 (8 or 7 free columns) double crust = 0., dhs_corr = 0., mass_corr = 0.; - if (!avgsum_time_series) - nCalcSteps = static_cast(ts_days_between / M_TO_D(calculation_step_length) + 0.5); if (out_haz) { crust = Hdata.crust; dhs_corr = Hdata.dhs_corr; @@ -1967,7 +2126,7 @@ void AsciiIO::writeTimeSeriesAddDefault(const SnowStation& Xdata, const SurfaceF if (out_heat) { // 94: change of internal energy (kJ m-2) if (Xdata.getNumberOfElements() > Xdata.SoilNode) - fout << "," << std::fixed << std::setprecision(3) << ((Sdata.dIntEnergy * static_cast(nCalcSteps)) + fout << "," << std::fixed << std::setprecision(3) << ((Sdata.dIntEnergy * static_cast(nCalcSteps)) - (Sdata.qg0 * D_TO_S(ts_days_between))) / 1000. << std::setprecision(6); else fout << "," << Constants::undefined; @@ -1979,7 +2138,7 @@ void AsciiIO::writeTimeSeriesAddDefault(const SnowStation& Xdata, const SurfaceF } // 96-97: new snow densities, measured and in use (kg m-3) if(Sdata.cRho_hn > 0.) { - fout << "," << std::fixed << std::setprecision(1) << Sdata.mRho_hn << "," << Sdata.cRho_hn << std::setprecision(6); + fout << "," << std::fixed << std::setprecision(1) << Sdata.mRho_hn << "," << Sdata.cRho_hn << std::setprecision(6); } else { if(Mdata.rho_hn != mio::IOUtils::nodata) fout << "," << std::fixed << std::setprecision(1) << -Mdata.rho_hn << "," << Sdata.cRho_hn << std::setprecision(6); @@ -2076,7 +2235,7 @@ void AsciiIO::writeTimeSeriesAddCalibration(const SnowStation& Xdata, const Surf fout << "," << std::fixed << std::setprecision(3) << ((Sdata.dIntEnergy * static_cast(nCalcSteps)) - (Sdata.qg0 * D_TO_S(ts_days_between))) / 1000. << std::setprecision(6); else - fout << "," << Constants::undefined; + fout << "," << Constants::undefined; // 95: sum of energy fluxes at surface (kJ m-2) fout << "," << ((Sdata.qw + Sdata.lw_net + Sdata.qs + Sdata.ql + Sdata.qr) * D_TO_S(ts_days_between)) / 1000.; @@ -2106,9 +2265,9 @@ void AsciiIO::writeMETHeader(const SnowStation& Xdata, std::ofstream &fout) cons const string stationname = Xdata.meta.getStationName(); fout << "[STATION_PARAMETERS]"; fout << "\nStationName= " << stationname; - fout << "\nLatitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLat(); - fout << "\nLongitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLon(); - fout << "\nAltitude= " << std::fixed << std::setprecision(0) << Xdata.meta.position.getAltitude(); + fout << "\nLatitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLat(); + fout << "\nLongitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLon(); + fout << "\nAltitude= " << std::fixed << std::setprecision(0) << Xdata.meta.position.getAltitude(); fout << "\nSlopeAngle= " << std::fixed << std::setprecision(2) << Xdata.meta.getSlopeAngle(); fout << "\nSlopeAzi= " << std::fixed << std::setprecision(2) << Xdata.meta.getAzimuth(); fout << "\nDepthTemp= " << std::fixed << std::setprecision(1) << (Xdata.SoilNode > 0); @@ -2119,25 +2278,30 @@ void AsciiIO::writeMETHeader(const SnowStation& Xdata, std::ofstream &fout) cons if (out_haz) { // HACK To avoid troubles in A3D fout << "\n#" << info.computation_date.toString(Date::ISO) << ", Snowpack " << variant << " version " << info.version << " run by \"" << info.user << "\""; if (research_mode) - fout << " (research mode)"; + fout << " (research mode)"; else - fout << " (operational mode)"; + fout << " (operational mode)"; } - fout << "\n,,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100"; + fout << "\n,,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101"; fout << "\nID,Date,Sensible heat,Latent heat,Outgoing longwave radiation,Incoming longwave radiation,Net absorbed longwave radiation,Reflected shortwave radiation,Incoming shortwave radiation,Net absorbed shortwave radiation,Modelled surface albedo,Air temperature,Modeled surface temperature,Measured surface temperature,Temperature at bottom of snow or soil pack,Heat flux at bottom of snow or soil pack,Ground surface temperature,Heat flux at ground surface,Heat advected to the surface by liquid precipitation,Global solar radiation (horizontal)"; if(out_haz==true || out_soileb==false) { - fout << ",Global solar radiation on slope,Direct solar radiation on slope,Diffuse solar radiation on slope,Measured surface albedo,Relative humidity,Wind speed,Max wind speed at snow station or wind speed at ridge station,Wind direction at snow station,Precipitation rate at surface (solid only),Modelled snow depth (vertical),Enforced snow depth (vertical),Surface hoar size,24h Drift index (vertical),Height of new snow HN (24h vertical),3d sum of daily height of new snow (vertical),SWE (of snowpack),Eroded mass,Rain rate,Snowpack runoff (virtual lysimeter)"; + fout << ",Global solar radiation on slope,Direct solar radiation on slope,Diffuse solar radiation on slope,Measured surface albedo,Relative humidity,Wind speed,Max wind speed at snow station or wind speed at ridge station,Wind direction at snow station,Precipitation rate at surface (solid only),Modelled snow depth (vertical),Enforced snow depth (vertical),Surface hoar size,24h Drift index (vertical),Height of new snow HN (24h vertical),3d sum of daily height of new snow (vertical),SWE (of snowpack),Eroded mass,Rain rate,Snowpack runoff (virtual lysimeter -- snow only), Surface mass flux (virtual lysimeter)"; } else { - fout << ",Global solar radiation on slope,Direct solar radiation on slope,Diffuse solar radiation on slope,Measured surface albedo,Relative humidity,Wind speed,Max wind speed at snow station or wind speed at ridge station,Wind direction at snow station,Precipitation rate at surface (solid only),Modelled snow depth (vertical),Enforced snow depth (vertical),Internal energy change soil,Melt freeze part of internal energy change soil,Cold content soil,,SWE (of snowpack),Eroded mass,Rain rate,Snowpack runoff (virtual lysimeter)"; + fout << ",Global solar radiation on slope,Direct solar radiation on slope,Diffuse solar radiation on slope,Measured surface albedo,Relative humidity,Wind speed,Max wind speed at snow station or wind speed at ridge station,Wind direction at snow station,Precipitation rate at surface (solid only),Modelled snow depth (vertical),Enforced snow depth (vertical),Internal energy change soil,Melt freeze part of internal energy change soil,Cold content soil,,SWE (of snowpack),Eroded mass,Rain rate,Snowpack runoff (virtual lysimeter -- snow only), Surface mass flux (virtual lysimeter)"; } fout << ",Sublimation,Evaporation,Temperature 1 (modelled),Temperature 1 (measured),Temperature 2 (modelled),Temperature 2 (measured),Temperature 3 (modelled),Temperature 3 (measured),Temperature 4 (modelled),Temperature 4 (measured),Temperature 5 (modelled),Temperature 5 (measured)"; if (maxNumberMeasTemperatures == 5) { - fout << ",Solute load at soil surface,Measured snow depth HS,Liquid Water Content (of snowpack),Profile type,Stability class,z_Sdef,Deformation rate stability index Sdef,z_Sn38,Natural stability index Sn38,z_Sk38,Skier stability index Sk38,z_SSI,Structural Stability index SSI,z_S5,Stability index S5"; + fout << ",Solute load at soil surface,Measured snow depth HS,Liquid Water Content (of snowpack),Liquid Water Content (of soil),Solid Water Content (of soil),Profile type,Stability class,z_Sdef,Deformation rate stability index Sdef,z_Sn38,Natural stability index Sn38,z_Sk38,Skier stability index Sk38,z_SSI,Structural Stability index SSI,z_S5,Stability index S5"; if (useCanopyModel && out_canopy) { - fout << ",Interception storage,Canopy surface temperature,Canopy albedo,Wet fraction,Interception capacity,Net shortwave radiation absorbed by canopy,Net longwave radiation absorbed by canopy,Net radiation to canopy,Sensible heat flux to canopy,Latent heat flux to canopy,Transpiration of the canopy,Evaporation and sublimation of interception (liquid and frozen),Interception rate,Throughfall,Snow unload,Sensible heat flux to the surface (ground+canopy),Latent heat flux to the surface (ground+canopy),Longwave radiation up above canopy,Longwave radiation down above canopy"; - fout << ",Net longwave radiation to the surface (ground + canopy),Shortwave radiation up above canopy,Shortwave radiation down above canopy,Net shortwave radiation to the surface (ground + canopy),Total land surface albedo,Total net radiation to the surface (ground + canopy),Surface radiative temperature (ground + canopy),Precipitation Above Canopy,Evapotranspiration of the total surface (ground + canopy)"; + Canopy::DumpCanopyHeader(fout); } else { - fout << ",-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-"; + if (variant == "SEAICE" && Xdata.Seaice != NULL) { + fout << ",Total thickness,Ice thickness,Snow thickness,Snow thickness wrt reference,Freeboard,Sea level,Tot salinity,Average bulk salinity,Average Brine Salinity,Bottom Sal Flux,Top Sal Flux,MS_FLOODING,MS_ICEBASE_MELTING_FREEZING"; + fout << ",-,-,-,-,-,-,-,-,-,-,-,-,-,-,-"; + } else { + // 28 empty fields + fout << ",-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-,-"; + } } } else if (out_t) { size_t jj = 0; @@ -2145,11 +2309,11 @@ void AsciiIO::writeMETHeader(const SnowStation& Xdata, std::ofstream &fout) cons size_t i_prn; if (ii < fixedPositions.size()) { i_prn = ii + 1; - fout << ",Temperature " << i_prn << " (modelled)"; + fout << ",Temperature " << i_prn << " (modelled)"; } else { i_prn = (ii-fixedPositions.size())+1; fout << ",Hfr " << i_prn; - fout << ",Tfr " << i_prn << " (modelled)"; + fout << ",Tfr " << i_prn << " (modelled)"; jj++; } if (ii < numberMeasTemperatures) { @@ -2163,21 +2327,6 @@ void AsciiIO::writeMETHeader(const SnowStation& Xdata, std::ofstream &fout) cons } jj += 2; } - if (Xdata.tag_low) { - size_t tag = Xdata.tag_low; - while ((tag + numberFixedSensors) <= totNumberSensors) { - const size_t j_lim = ((tag + numberFixedSensors) <= numberMeasTemperatures)? 41 : 43; - if (jj < j_lim) { - fout << ",H(tag" << std::fixed << std::setfill('0') << std::setw(2) << tag << "),T(tag" << tag << ")"; - jj += 2; - if (numberFixedSensors < numberMeasTemperatures) { - fout << ",H(meas" << std::fixed << std::setfill('0') << std::setw(2) << tag << "),T(meas" << tag << ")"; - jj += 2; - } - tag++; - } - } - } for (; jj < 44; jj++) { fout << ",-"; } @@ -2187,7 +2336,7 @@ void AsciiIO::writeMETHeader(const SnowStation& Xdata, std::ofstream &fout) cons if (variant == "ANTARCTICA") { if (maxNumberMeasTemperatures == 5) - fout << ",Measured snow depth HS"; + fout << ",Measured snow depth HS"; fout << ",Internal energy change,Surface input (sum fluxes),Measured new snow density,Modeled new snow density,Erosion level (from srf),Running mean relative humidity (100h),Running mean wind speed (100h)"; } else if (variant == "CALIBRATION") { if (maxNumberMeasTemperatures == 5) @@ -2202,16 +2351,21 @@ void AsciiIO::writeMETHeader(const SnowStation& Xdata, std::ofstream &fout) cons } if(out_haz==true || out_soileb==false) { - fout << "\n,,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,degC,degC,degC,degC,W m-2,degC,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,%,m s-1,m s-1,deg,kg m-2 h-1,cm,cm,mm,cm,cm,cm,kg m-2,kg m-2 h-1,kg m-2 h-1,kg m-2,kg m-2,kg m-2,degC,degC,degC,degC,degC,degC,degC,degC,degC,degC"; + fout << "\n,,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,degC,degC,degC,degC,W m-2,degC,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,%,m s-1,m s-1,deg,kg m-2 h-1,cm,cm,mm,cm,cm,cm,kg m-2,kg m-2 h-1,kg m-2 h-1,kg m-2,kg m-2,kg m-2,kg m-2,degC,degC,degC,degC,degC,degC,degC,degC,degC,degC"; } else { - fout << "\n,,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,degC,degC,degC,degC,W m-2,degC,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,%,m s-1,m s-1,deg,kg m-2 h-1,cm,cm,kJ m-2,kJ m-2,MJ m-2,,kg m-2,kg m-2 h-1,kg m-2 h-1,kg m-2,kg m-2,kg m-2,degC,degC,degC,degC,degC,degC,degC,degC,degC,degC"; + fout << "\n,,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,degC,degC,degC,degC,W m-2,degC,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,1,%,m s-1,m s-1,deg,kg m-2 h-1,cm,cm,kJ m-2,kJ m-2,MJ m-2,kg m-2 h-1,kg m-2 h-1,kg m-2,kg m-2,kg m-2,kg m-2,kg m-2,degC,degC,degC,degC,degC,degC,degC,degC,degC,degC"; } if (maxNumberMeasTemperatures == 5) { fout << ",kg m-2,cm,kg m-2,-,-,cm,1,cm,1,cm,1,cm,1,cm,1"; if (out_canopy && useCanopyModel) { - fout << ",kg m-2,degC,-,-,kg m-2,W m-2,W m-2,W m-2,W m-2,W m-2,kg m-2 per timestep,kg m-2 per timestep,kg m-2 per timestep,kg m-2 per timestep,kg m-2 per timestep,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,W m-2,degC,kg m-2 per timestep,kg m-2 per timestep"; + Canopy::DumpCanopyUnits(fout); } else { - fout << ",,,,,,,,,,,,,,,,,,,,,,,,,,,,"; + if (variant == "SEAICE" && Xdata.Seaice != NULL) { + fout << ",m,m,m,m,m,m,g m-2,g kg-1,g kg-1,g m-2,g m-2"; + fout << ",,,,,,,,,,,,,,,,,"; + } else { + fout << ",,,,,,,,,,,,,,,,,,,,,,,,,,,,"; + } } } else if (out_t) { size_t jj = 0; @@ -2227,21 +2381,6 @@ void AsciiIO::writeMETHeader(const SnowStation& Xdata, std::ofstream &fout) cons jj++; } } - if (Xdata.tag_low) { - size_t tag = Xdata.tag_low; - while ((tag + numberFixedSensors) <= totNumberSensors) { - const size_t j_lim = ((tag + numberFixedSensors) <= numberMeasTemperatures)? 41 : 43; - if (jj < j_lim) { - fout << ",cm,degC"; - jj += 2; - if (numberFixedSensors < numberMeasTemperatures) { - fout << ",cm,degC"; - jj += 2; - } - tag++; - } - } - } for (; jj < 44; jj++) fout <<","; } else { @@ -2271,10 +2410,10 @@ void AsciiIO::writeProHeader(const SnowStation& Xdata, std::ofstream &fout) cons { const string stationname = Xdata.meta.getStationName(); fout << "[STATION_PARAMETERS]"; - fout << "\nStationName= " << stationname; - fout << "\nLatitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLat(); - fout << "\nLongitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLon(); - fout << "\nAltitude= " << std::fixed << std::setprecision(0) << Xdata.meta.position.getAltitude(); + fout << "\nStationName= " << stationname; + fout << "\nLatitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLat(); + fout << "\nLongitude= " << std::fixed << std::setprecision(8) << Xdata.meta.position.getLon(); + fout << "\nAltitude= " << std::fixed << std::setprecision(0) << Xdata.meta.position.getAltitude(); fout << "\nSlopeAngle= " << std::fixed << std::setprecision(2) << Xdata.meta.getSlopeAngle(); fout << "\nSlopeAzi= " << std::fixed << std::setprecision(2) << Xdata.meta.getAzimuth(); @@ -2282,16 +2421,27 @@ void AsciiIO::writeProHeader(const SnowStation& Xdata, std::ofstream &fout) cons if (out_haz) { // HACK To avoid troubles in A3D fout << "\n#" << info.computation_date.toString(Date::ISO) << ", Snowpack " << variant << " version " << info.version << " run by \"" << info.user << "\""; if (research_mode) - fout << " (research mode)"; + fout << " (research mode)"; else - fout << " (operational mode)"; + fout << " (operational mode)"; } fout << "\n0500,Date"; fout << "\n0501,nElems,height [> 0: top, < 0: bottom of elem.] (cm)"; fout << "\n0502,nElems,element density (kg m-3)"; fout << "\n0503,nElems,element temperature (degC)"; + if (prof_ID_or_MK == "ID") { + fout << "\n0504,nElems,element ID (1)"; + } else { + fout << "\n0504,nElems,element mk (1)"; + } + if (prof_AGE_or_DATE == "AGE") { + fout << "\n0505,nElems,element age (days)"; + } else { + fout << "\n0505,nElems,element deposition date (ISO)"; + } fout << "\n0506,nElems,liquid water content by volume (%)"; + if(enable_pref_flow) fout << "\n0507,nElems,liquid preferential flow water content by volume (%)"; fout << "\n0508,nElems,dendricity (1)"; fout << "\n0509,nElems,sphericity (1)"; fout << "\n0510,nElems,coordination number (1)"; @@ -2308,19 +2458,35 @@ void AsciiIO::writeProHeader(const SnowStation& Xdata, std::ofstream &fout) cons fout << "\n0521,nElems,thermal conductivity (W K-1 m-1)"; fout << "\n0522,nElems,absorbed shortwave radiation (W m-2)"; fout << "\n0523,nElems,viscous deformation rate (1.e-6 s-1)"; + if(enable_ice_reservoir) fout << "\n0524,nElems, ice reservoir volume fraction (%)"; + if(enable_ice_reservoir) fout << "\n0525,nElems, cumulated ice reservoir volume fraction (%)"; fout << "\n0530,8,position (cm) and minimum stability indices:"; fout << "\n profile type, stability class, z_Sdef, Sdef, z_Sn38, Sn38, z_Sk38, Sk38"; fout << "\n0531,nElems,deformation rate stability index Sdef"; fout << "\n0532,nElems,natural stability index Sn38"; fout << "\n0533,nElems,stability index Sk38"; - fout << "\n0534,nElems,hand hardness either (N) or index steps (1)"; + if (r_in_n) { + fout << "\n0534,nElems,hand hardness in Newton (N)"; + } else { + fout << "\n0534,nElems,hand hardness in index steps (1)"; + } fout << "\n0535,nElems,optical equivalent grain size (mm)"; + if (Xdata.Seaice != NULL) { + fout << "\n0540,nElems,bulk salinity (g/kg)"; + fout << "\n0541,nElems,brine salinity (g/kg)"; + } + if (useRichardsEq && Xdata.meta.getSlopeAngle() > 0.) fout << "\n0560,nElems,potential_lateral_flow_rate_(kg/m3)"; fout << "\n0601,nElems,snow shear strength (kPa)"; fout << "\n0602,nElems,grain size difference (mm)"; fout << "\n0603,nElems,hardness difference (1)"; fout << "\n0604,nElems,ssi"; fout << "\n0605,nElems,inverse texture index ITI (Mg m-4)"; fout << "\n0606,nElems,critical cut length (m)"; + if (metamorphism_model == "NIED") { + fout << "\n0621,nElems,dry snow metamorphism factor (dsm)"; + fout << "\n0622,nElems,Sigdsm"; + fout << "\n0623,nElems,S_dsm"; + } if (variant == "CALIBRATION") { fout << "\n0701,nElems,SNOWPACK: total settling rate (% h-1)"; fout << "\n0702,nElems,SNOWPACK: settling rate due to load (% h-1)"; @@ -2331,6 +2497,15 @@ void AsciiIO::writeProHeader(const SnowStation& Xdata, std::ofstream &fout) cons fout << "\n0892,nElems,SNTHERM: settling rate due to metamorphism (% h-1)"; fout << "\n0893,nElems,SNTHERM: viscosity (GPa s)"; } + + if(enable_vapour_transport) { + fout << "\n0901,nElems, the degree of undersaturation, (rhov-rohv_sat)/rhov_sat (-)"; + fout << "\n0902,nElems, the water vapor diffusion flux (kg m-2 s-1)"; + fout << "\n0903,nElems, the cumulative density change due to water vapor transport (kg m-3)"; + fout << "\n0904,nElems, the snow density change rate due to water vapor transport (1.0e-6 kg m-3)"; + fout << "\n0905,nElems, the element tracking for comparison, (-)"; + } + fout << "\n\n[DATA]"; } @@ -2375,13 +2550,13 @@ bool AsciiIO::checkHeader(const SnowStation& Xdata, const std::string& filename, return false; } else { std::ofstream fout; - fout.open(filename.c_str(), std::ios::out); + fout.open(filename.c_str(), std::ios::out); if (fout.fail()) return false; if (ext=="err") { fout << "[SNOWPACK_ERROR_LOG]\n"; - fout << " RUNTIME : STN LOC LINE MSG [JULIAN]"; + fout << " RUNTIME : STN LOC LINE MSG [JULIAN]"; } else if (ext=="met") { writeMETHeader(Xdata, fout); } else if (ext=="pro") { @@ -2402,52 +2577,3 @@ bool AsciiIO::writeHazardData(const std::string& /*stationID*/, const std::vecto { throw IOException("Nothing implemented here!", AT); } - -/** - * @brief Reads labels and dates from file for tagging - * @author Thomas Egger - * @param TAGdata - * @param filename Filename to read from - * @param Mdata To pass zv_ts[] values for initialization - */ -void AsciiIO::readTags(const std::string& filename, const CurrentMeteo& Mdata, TaggingData& TAGdata) -{ - Config tagging_config(filename); - tagging_config.getValue("NUMBER_TAGS", numberTags); //HACK: numberTags should be a member of TAGdata? - tagging_config.getValue("TAG_LOW", TAGdata.tag_low); - tagging_config.getValue("TAG_TOP", TAGdata.tag_top); - tagging_config.getValue("REPOS_LOW", TAGdata.repos_low); - tagging_config.getValue("REPOS_TOP", TAGdata.repos_top); - - totNumberSensors += numberTags; - - TAGdata.tag_low = std::max((size_t)1, std::min(TAGdata.tag_low, numberTags)); - TAGdata.tag_top = std::min(TAGdata.tag_top, numberTags); - TAGdata.repos_low = std::max((size_t)1, TAGdata.repos_low); - TAGdata.repos_top = std::min(TAGdata.repos_top, numberTags); - - TAGdata.resize(numberTags + 1); - TAGdata.useSoilLayers = useSoilLayers; - - for (size_t tag=1; tag<=numberTags; tag++) { - stringstream ss; - ss << setw(2) << setfill('0') << tag; - - tagging_config.getValue("LABEL_" + ss.str(), TAGdata.tags[tag-1].label); - - string date_string; - tagging_config.getValue("DATE_" + ss.str(), date_string); - IOUtils::convertString(TAGdata.tags[tag-1].date, date_string, time_zone); - - if ( (tag >= TAGdata.repos_low) && (tag <= TAGdata.repos_top) ) { - const size_t depth = fixedPositions.size() + tag - 1; - if (Mdata.zv_ts.size() > depth) { - TAGdata.tags[tag-1].previous_depth = Mdata.zv_ts[depth]; - } else { //HACK: can I do this? does this make sense? - TAGdata.tags[tag-1].previous_depth = IOUtils::nodata; - } - } else { - TAGdata.tags[tag-1].previous_depth = IOUtils::nodata; - } - } -} diff --git a/third_party/snowpack/plugins/AsciiIO.h b/third_party/snowpack/plugins/AsciiIO.h index a2d69f80..c0765e66 100644 --- a/third_party/snowpack/plugins/AsciiIO.h +++ b/third_party/snowpack/plugins/AsciiIO.h @@ -21,8 +21,8 @@ #ifndef ASCIIIO_H #define ASCIIIO_H -#include "SnowpackIOInterface.h" #include +#include "SnowpackIOInterface.h" class AsciiIO : public SnowpackIOInterface { @@ -33,7 +33,7 @@ class AsciiIO : public SnowpackIOInterface { virtual bool snowCoverExists(const std::string& i_snowfile, const std::string& stationID) const; virtual void readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata); + SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata, const bool& read_salinity); virtual void writeSnowCover(const mio::Date& date, const SnowStation& Xdata, const ZwischenData& Zdata, const bool& forbackup=false); @@ -96,10 +96,8 @@ class AsciiIO : public SnowpackIOInterface { const double dhs_corr, const double mass_corr, const size_t nCalcSteps, std::ofstream &fout); - void readTags(const std::string& filename, const CurrentMeteo& Mdata, TaggingData& TAGdata); - std::set setAppendableFiles; - std::string variant, experiment, sw_mode; + std::string metamorphism_model, variant, experiment, sw_mode; std::string inpath, snowfile, i_snowpath, outpath, o_snowpath; const RunInfo info; @@ -114,9 +112,14 @@ class AsciiIO : public SnowpackIOInterface { double time_zone; // time zone of input double calculation_step_length, hazard_steps_between, ts_days_between; double min_depth_subsurf, hoar_density_surf, hoar_min_size_surf; + bool useRichardsEq, enable_pref_flow, enable_ice_reservoir; + bool enable_vapour_transport; bool avgsum_time_series, useCanopyModel, useSoilLayers, research_mode, perp_to_slope; + bool useReferenceLayer; //Whether or not the output should be referenced to the marked reference layer (i.e., the layer with int(mk/1000)==9). bool out_heat, out_lw, out_sw, out_meteo, out_haz, out_mass, out_t, out_load, out_stab, out_canopy, out_soileb; bool r_in_n; + std::string prof_ID_or_MK; + std::string prof_AGE_or_DATE; static const bool t_srf, t_gnd; }; diff --git a/third_party/snowpack/plugins/CMakeLists.txt b/third_party/snowpack/plugins/CMakeLists.txt index 940c0a58..e9aa82e5 100644 --- a/third_party/snowpack/plugins/CMakeLists.txt +++ b/third_party/snowpack/plugins/CMakeLists.txt @@ -19,8 +19,7 @@ ENDIF(PLUGIN_IMISIO) IF(PLUGIN_CAAMLIO) SET(plugins_sources ${plugins_sources} plugins/CaaMLIO.cc) - FIND_PACKAGE(LibXml2 REQUIRED) - INCLUDE_DIRECTORIES(SYSTEM ${LIBXML2_INCLUDE_DIR}) - LINK_DIRECTORIES(${LIBXML2_LIBRARIES}) - SET(extra_libs ${extra_libs} ${LIBXML2_LIBRARIES}) + SET(plugins_sources ${plugins_sources} plugins/pugixml/pugixml.cpp) + set_source_files_properties(plugins/pugixml/pugixml.cpp PROPERTIES COMPILE_FLAGS "-w") ENDIF(PLUGIN_CAAMLIO) + diff --git a/third_party/snowpack/plugins/CaaMLIO.cc b/third_party/snowpack/plugins/CaaMLIO.cc index 0bb053f4..4c4bbd66 100644 --- a/third_party/snowpack/plugins/CaaMLIO.cc +++ b/third_party/snowpack/plugins/CaaMLIO.cc @@ -1,1217 +1,1715 @@ -/***********************************************************************************/ -/* Copyright 2014 WSL Institute for Snow and Avalanche Research SLF-DAVOS */ -/***********************************************************************************/ -/* This file is part of Snowpack. - MeteoIO is free software: you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - MeteoIO is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with MeteoIO. If not, see . -*/ -#include "CaaMLIO.h" -#include "../Utils.h" -//#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#if !defined(LIBXML_XPATH_ENABLED) - #error Please enable XPATH in your version of libxml! -#endif -#if !defined(LIBXML_SAX1_ENABLED) - #error Please enable SAX1 in your version of libxml! -#endif -#if !defined(LIBXML_TREE_ENABLED) - #error Please enable TREE in your version of libxml! -#endif - -using namespace std; -using namespace mio; - -/** - * @page caaml CAAML - * @section caaml_format Format - * This plugin reads the CAAML files as generated according
CAAML V5.0's - * specification. - * - * @section caaml_keywords Keywords - * This plugin uses the following keywords: - * - COORDSYS: input coordinate system (see Coords) specified in the [Input] section - * - SNOW: specify COSMOCAAML for [Input] section - * - SNOWPATH: string containing the path to the xml files to be read, specified in the [Input] section - * - SNOWFILE: specify the xml file to read the data from (optional) - * - SNOW_PREFIX: file name prefix appearing before the date (optional) - * - SNOW_EXT: file extension (default: ".xml", give "none" to get an empty string) - * - STATION#: ID of the station to read - * - IMIS_STATIONS: if set to true, all station IDs provided above will be stripped of their number (to match MeteoCH naming scheme) - * - USE_MODEL_LOC: if set to false, the true station location (lat, lon, altitude) is used. Otherwise, it uses the model location (default) - * - XML_ENCODING: force the input file encoding, overriding the file's own encoding declaration (optional, see \ref caaml_encoding "XML encoding" below) - * - * If no SNOWFILE is provided, all "*.caaml" files in the SNOWPATH directory will be read, if they match the SNOW_PREFIX and SNOW_EXT. - * They must contain the date of the first data formatted as ISO8601 numerical UTC date in their file name. For example, a file containing simulated - * meteorological fields from 2014-03-03T12:00 until 2014-03-05T00:00 could be named such as "cosmo_201403031200.xml" - * If some numbers appear before the numerical date, they must be provided as part of SNOW_PREFIX so the plugin can - * properly extract the date (for MeteoSwiss, this must be set to "VNMH49"). - * - * Example: - * @code - * [Input] - * COORDSYS = CH1903 - * SNOW = CAAML - * SNOWPATH = ./input/snowCAAMLdata - * SNOWFILE = 5WJ_20120229.caaml - * @endcode - * - * @subsection caaml_encoding XML encoding - * Each XML document should specify its encoding. However this information might sometimes be missing or even worse, be false. This makes the XML document non-compliant. - * Normally, CAAML reads the file encoding in the file itself. If this does not work (one of the two cases given above), it is possible to force the - * encoding of the input file by using the "XML_ENCODING" option. This option takes one of the following values - * ("LE" stands for "Little Endian" and "BE" for "Big Endian"): - * - for UTF/UCS: UTF-8, UTF-16-LE, UTF-16-BE, UCS-4-LE, UCS-4-BE, UCS-4-2143, UCS-4-3412, UCS-2, EBCDIC - * - for ISO-8859: ISO-8859-1, ISO-8859-2, ISO-8859-3, ISO-8859-4, ISO-8859-5, ISO-8859-6, ISO-8859-7, ISO-8859-8, ISO-8859-9 - * - for Japanses: ISO-2022-JP, SHIFT-JIS, EUC-JP - * - for ascii: ASCII - - */ - -//Define namespaces and abbreviations -const xmlChar* CaaMLIO::xml_ns_caaml = (const xmlChar*) "http://caaml.org/Schemas/V5.0/Profiles/SnowProfileIACS"; -const xmlChar* CaaMLIO::xml_ns_abrev_caaml = (const xmlChar*) "caaml"; -const xmlChar* CaaMLIO::xml_ns_gml = (const xmlChar*) "http://www.opengis.net/gml"; -const xmlChar* CaaMLIO::xml_ns_abrev_gml = (const xmlChar*) "gml"; -const xmlChar* CaaMLIO::xml_ns_xsi = (const xmlChar*) "http://www.w3.org/2001/XMLSchema-instance"; -const xmlChar* CaaMLIO::xml_ns_abrev_xsi = (const xmlChar*) "xsi"; -const xmlChar* CaaMLIO::xml_ns_slf = (const xmlChar*) "http://www.slf.ch/snowprofile/1.0"; -const xmlChar* CaaMLIO::xml_ns_abrev_slf = (const xmlChar*) "slf"; -const xmlChar* CaaMLIO::xml_ns_snp = (const xmlChar*) "http://www.slf.ch/snowpack/1.0"; -const xmlChar* CaaMLIO::xml_ns_abrev_snp = (const xmlChar*) "snp"; -// const std::string xml_schemaLocation_snp = "http://www.slf.ch/snowpack/snowpack.xsd"; -const std::string namespaceCAAML = "caaml"; -const std::string namespaceSNP = "snp"; -//Define paths in xml-file -const std::string CaaMLIO::TimeData_xpath = "/caaml:SnowProfile/caaml:validTime"; -const std::string CaaMLIO::StationMetaData_xpath = "/caaml:SnowProfile/caaml:locRef/caaml:ObsPoint"; -const std::string CaaMLIO::SnowData_xpath = "/caaml:SnowProfile/caaml:snowProfileResultsOf/caaml:SnowProfileMeasurements"; - -CaaMLIO::CaaMLIO(const SnowpackConfig& cfg, const RunInfo& run_info) - : info(run_info), - i_snowpath(), sw_mode(), o_snowpath(), experiment(), - useSoilLayers(false), perp_to_slope(false), aggregate_caaml(false), in_tz(), - snow_prefix(), snow_ext(".caaml"), caaml_nodata(-999.), - in_doc(NULL), in_xpathCtx(NULL), in_encoding(XML_CHAR_ENCODING_NONE) -{ - init(cfg); -} - -void CaaMLIO::init(const SnowpackConfig& cfg) -{ - std::string tmpstr; - - LIBXML_TEST_VERSION //check lib versions and call xmlInitParser() - - cfg.getValue("SW_MODE", "Snowpack", sw_mode); - cfg.getValue("SNP_SOIL", "Snowpack", useSoilLayers); - cfg.getValue("PERP_TO_SLOPE", "SnowpackAdvanced", perp_to_slope); - cfg.getValue("TIME_ZONE", "Input", in_tz); - - cfg.getValue("SNOW_EXT", "INPUT", snow_ext); - // if ( IOUtils::strToUpper(snow_ext)=="NONE" ) snow_ext=""; - cfg.getValue("METEOPATH", "Input", tmpstr); - cfg.getValue("SNOWPATH", "Input", i_snowpath); - if (i_snowpath.empty()) - i_snowpath = tmpstr; - - cfg.getValue("AGGREGATE_CAAML", "Output", aggregate_caaml); - cfg.getValue("EXPERIMENT", "Output", experiment); - cfg.getValue("METEOPATH", "Output", tmpstr); - cfg.getValue("SNOWPATH", "Output", o_snowpath); - if (o_snowpath.empty()) - o_snowpath = tmpstr; - - //input encoding forcing, inherited from CosmoXMLIO - tmpstr.clear(); - cfg.getValue("XML_ENCODING", "INPUT", tmpstr); - if (!tmpstr.empty()) { - if (tmpstr=="UTF-8") in_encoding=XML_CHAR_ENCODING_UTF8; - else if (tmpstr=="UTF-16-LE") in_encoding=XML_CHAR_ENCODING_UTF16LE; - else if (tmpstr=="UTF-16-BE") in_encoding=XML_CHAR_ENCODING_UTF16BE; - else if (tmpstr=="UCS-4-LE") in_encoding=XML_CHAR_ENCODING_UCS4LE; - else if (tmpstr=="UCS-4-BE") in_encoding=XML_CHAR_ENCODING_UCS4BE; - else if (tmpstr=="EBCDIC") in_encoding=XML_CHAR_ENCODING_EBCDIC; - else if (tmpstr=="UCS-4-2143") in_encoding=XML_CHAR_ENCODING_UCS4_2143; - else if (tmpstr=="UCS-4-3412") in_encoding=XML_CHAR_ENCODING_UCS4_3412; - else if (tmpstr=="UCS-2") in_encoding=XML_CHAR_ENCODING_UCS2; - else if (tmpstr=="ISO-8859-1") in_encoding=XML_CHAR_ENCODING_8859_1; - else if (tmpstr=="ISO-8859-2") in_encoding=XML_CHAR_ENCODING_8859_2; - else if (tmpstr=="ISO-8859-3") in_encoding=XML_CHAR_ENCODING_8859_3; - else if (tmpstr=="ISO-8859-4") in_encoding=XML_CHAR_ENCODING_8859_4; - else if (tmpstr=="ISO-8859-5") in_encoding=XML_CHAR_ENCODING_8859_5; - else if (tmpstr=="ISO-8859-6") in_encoding=XML_CHAR_ENCODING_8859_6; - else if (tmpstr=="ISO-8859-7") in_encoding=XML_CHAR_ENCODING_8859_7; - else if (tmpstr=="ISO-8859-8") in_encoding=XML_CHAR_ENCODING_8859_8; - else if (tmpstr=="ISO-8859-9") in_encoding=XML_CHAR_ENCODING_8859_9; - else if (tmpstr=="ISO-2022-JP") in_encoding=XML_CHAR_ENCODING_2022_JP; - else if (tmpstr=="SHIFT-JIS") in_encoding=XML_CHAR_ENCODING_SHIFT_JIS; - else if (tmpstr=="EUC-JP") in_encoding=XML_CHAR_ENCODING_EUC_JP; - else if (tmpstr=="ASCII") in_encoding=XML_CHAR_ENCODING_ASCII; - else - throw InvalidArgumentException("Encoding \""+tmpstr+"\" is not supported!", AT); - } -} - -CaaMLIO& CaaMLIO::operator=(const CaaMLIO& source) { - if (this != &source) { - caaml_nodata = source.caaml_nodata; - in_doc = NULL; - in_xpathCtx = NULL; - } - return *this; -} - -CaaMLIO::~CaaMLIO() throw() -{ - closeIn_CAAML(); -} - -void CaaMLIO::openIn_CAAML(const std::string& in_snowfile) -{ -// if (in_doc!=NULL) return; //the file has already been read - xmlInitParser(); - xmlKeepBlanksDefault(0); - - if (in_encoding==XML_CHAR_ENCODING_NONE) { - in_doc = xmlParseFile(in_snowfile.c_str()); - } else { - const xmlParserCtxtPtr ctxt( xmlCreateFileParserCtxt( in_snowfile.c_str() ) ); - xmlSwitchEncoding( ctxt, in_encoding); - xmlParseDocument( ctxt); - in_doc = ctxt->myDoc; - } - - if (in_xpathCtx != NULL) xmlXPathFreeContext(in_xpathCtx); //free variable if this was not freed before - in_xpathCtx = xmlXPathNewContext(in_doc); - if (in_xpathCtx == NULL) { - closeIn_CAAML(); - throw IOException("Unable to create new XPath context", AT); - } - - if (xmlXPathRegisterNs(in_xpathCtx, xml_ns_abrev_caaml, xml_ns_caaml) != 0) { - throw IOException("Unable to register namespace with prefix", AT); - } - - if (xmlXPathRegisterNs(in_xpathCtx, xml_ns_abrev_slf, xml_ns_slf) != 0) { - throw IOException("Unable to register namespace with prefix", AT); - } - - if (xmlXPathRegisterNs(in_xpathCtx, xml_ns_abrev_snp, xml_ns_snp) != 0) { - throw IOException("Unable to register namespace with prefix", AT); - } -} - -void CaaMLIO::closeIn_CAAML() throw() -{ - if (in_xpathCtx!=NULL) { - xmlXPathFreeContext(in_xpathCtx); - in_xpathCtx = NULL; - } - if (in_doc!=NULL) { - xmlFreeDoc(in_doc); - in_doc = NULL; - } - xmlCleanupParser(); -} - -/** - * @brief This routine checks if the specified snow cover data exists - * @param i_snowfile file containing the initial state of the snowpack - * @param stationID - * @return true if the file exists - */ -bool CaaMLIO::snowCoverExists(const std::string& i_snowfile, const std::string& /*stationID*/) const -{ - std::string snofilename( getFilenamePrefix(i_snowfile, i_snowpath, false) ); - - if (snofilename.rfind(".caaml") == string::npos) { - snofilename += ".caaml"; - } - - return FileUtils::fileExists(snofilename); -} - -/** - * @brief This routine reads the status of the snow cover at program start - * @param i_snowfile file containing the initial state of the snowpack - * @param stationID - * @param SSdata - * @param Zdata - */ -void CaaMLIO::readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata) -{ - std::string snofilename( getFilenamePrefix(i_snowfile, i_snowpath, false) ); - std::string hazfilename(snofilename); - - if (snofilename.rfind(".caaml") == string::npos) { - snofilename += ".caaml"; - hazfilename += ".haz"; - } else { - hazfilename.replace(hazfilename.rfind(".caaml"), 6, ".haz"); - } - - read_snocaaml(snofilename, stationID, SSdata); - Zdata.reset(); -} - -// complete filename_prefix -std::string CaaMLIO::getFilenamePrefix(const std::string& fnam, const std::string& path, const bool addexp) const -{ - //TODO: read only once (in constructor) - std::string filename_prefix( path + "/" + fnam ); - - if (addexp && (experiment != "NO_EXP")) - filename_prefix += "_" + experiment; - - return filename_prefix; -} - -//Read CAAML file -bool CaaMLIO::read_snocaaml(const std::string& in_snowFilename, const std::string& stationID, SN_SNOWSOIL_DATA& SSdata) -{ - // Read CAAML snow profile file - openIn_CAAML(in_snowFilename); - - //Read profile date - SSdata.profileDate = xmlGetDate(); - - //Read station metadata - SSdata.meta = xmlGetStationData(stationID); - - //Snow-Soil properties: set to default if not available in file - setCustomSnowSoil(SSdata); - - //Read quantity profiles - std::list xpaths; - xpaths.push_back("/caaml:tempProfile/caaml:Obs"); - xpaths.push_back("/caaml:densityProfile/caaml:Layer"); - xpaths.push_back("/caaml:hardnessProfile/caaml:Layer"); - std::vector > depths( xpaths.size() ); //store 3 profiles: obs, density and hardness - std::vector > val( xpaths.size() ); - - //Loop on the paths to read corresponding profile - size_t jj = 0; - for (std::list::iterator path=xpaths.begin(); path!=xpaths.end(); path++, jj++) { - getProfiles(*path, depths[jj], val[jj]); - } - - //Read layers - const xmlNodeSetPtr data( xmlGetData(SnowData_xpath+"/caaml:stratProfile/caaml:Layer") ); - - SSdata.nLayers = static_cast( data->nodeNr ); - SSdata.Ldata.resize(SSdata.nLayers, LayerData()); - - //Loop on the layer nodes to set their properties - jj = 0; - if (SSdata.nLayers>0) { - const bool reverse = getLayersDir(); //Read profile direction - if (!reverse) { - for (size_t ii = 0; ii < SSdata.nLayers; ii++, jj++) - SSdata.Ldata[jj] = xmlGetLayer(data->nodeTab[ii]); - } else { - for (size_t ii = SSdata.nLayers; ii-- > 0; jj++) - SSdata.Ldata[jj] = xmlGetLayer(data->nodeTab[ii]); - } - } - - //Set temperature, density and hardness from the profiles - setProfileVal(SSdata.Ldata, depths, val); - - //Layer default values - for (size_t ii = 0; ii < SSdata.nLayers; ii++) { - //Layer properties: set to default if not available in file - setCustomLayerData(SSdata.Ldata[ii]); - SSdata.Ldata[ii].phiVoids = 1. - SSdata.Ldata[ii].phiSoil - SSdata.Ldata[ii].phiWater - SSdata.Ldata[ii].phiIce; - } - - //Set deposition date from the layers - setDepositionDates(SSdata.Ldata,SSdata.profileDate); - - //Compute total number of layers and height - SSdata.nN = 1; - SSdata.Height = 0.; - for (size_t ii = 0; ii < SSdata.nLayers; ii++) { - SSdata.nN += SSdata.Ldata[ii].ne; - SSdata.Height += SSdata.Ldata[ii].hl; - } - SSdata.HS_last = SSdata.Height; - - closeIn_CAAML(); - - return true; -} - -xmlNodeSetPtr CaaMLIO::xmlGetData(const std::string& path) -{ - const xmlXPathObjectPtr xpathObj( xmlXPathEvalExpression((const xmlChar*)path.c_str(),in_xpathCtx) ); - if (xpathObj == NULL) { - throw NoDataException("Invalid xpath expression: '"+path+"'", AT); - } - - xmlNodeSetPtr &data = xpathObj->nodesetval; - if (xmlXPathNodeSetIsEmpty(data) || data->nodeNr==0) { - xmlXPathFreeObject(xpathObj); - throw NoDataException("No data found for '"+path+"'", AT); - } - - return data; -} - -Date CaaMLIO::xmlGetDate() -{ - const xmlXPathObjectPtr xpathObj( xmlXPathEvalExpression((const xmlChar*)TimeData_xpath.c_str(),in_xpathCtx) ); - if (xpathObj == NULL) { - throw NoDataException("Invalid xpath expression: '"+TimeData_xpath+"'", AT); - } - - if (xmlXPathNodeSetIsEmpty(xpathObj->nodesetval)) { - xmlXPathFreeObject(xpathObj); - throw NoDataException("No data found for '"+TimeData_xpath+"'", AT); - } - - const std::string date_str( (char*) xmlNodeGetContent(xpathObj->nodesetval->nodeTab[0]) ); - - Date date; - IOUtils::convertString(date, date_str, in_tz); - return date; -} - -StationData CaaMLIO::xmlGetStationData(const std::string& stationID) -{ - double x=IOUtils::nodata, y=IOUtils::nodata, z=IOUtils::nodata; - double slopeAngle=IOUtils::nodata, azimuth=IOUtils::nodata; - std::string stationName; - - const xmlNodeSetPtr data( xmlGetData(StationMetaData_xpath) ); - for (xmlNode *cur_c = data->nodeTab[0]->children; cur_c; cur_c = cur_c->next) { - if (cur_c->type != XML_TEXT_NODE) { - const std::string field_name( (const char*)cur_c->name ); - //Ignore some fields - if (field_name!="customData" && field_name!="comment" && field_name!="metaDataProperty") { - if (field_name=="name") { - stationName = std::string((const char*)xmlNodeGetContent(cur_c)); - } else if (field_name=="validElevation") { - sscanf((const char*)xmlNodeGetContent(cur_c),"%lf",&z); - } else if (field_name=="validSlopeAngle") { - sscanf((const char*)xmlNodeGetContent(cur_c),"%lf",&slopeAngle); - } else if (field_name=="validAspect") { - azimuth = IOUtils::bearing( string((const char*)xmlNodeGetContent(cur_c)) ); - } else if (field_name=="pointLocation") { - sscanf((const char*)xmlNodeGetContent(cur_c),"%lf %lf",&x,&y); - } - } - } - } - - Coords tmppos; - tmppos.setLatLon(x, y, z); - StationData metatmp(tmppos, stationID, stationName); - metatmp.setSlope(slopeAngle, azimuth); - return metatmp; -} - -double CaaMLIO::xmlSetVal(const string& xpath, const string& property, const double& dflt) -{ - const std::string path( SnowData_xpath+xpath+":"+property ); - const xmlXPathObjectPtr xpathObj( xmlXPathEvalExpression((const xmlChar*)path.c_str(), in_xpathCtx) ); - double val = IOUtils::nodata; - - if (xpathObj->nodesetval->nodeNr > 0) - sscanf((const char*)xmlNodeGetContent(xpathObj->nodesetval->nodeTab[0]), "%lf", &val); - else - val = dflt; - - xmlXPathFreeObject(xpathObj); - return val; -} - -int CaaMLIO::xmlSetVal(const string& xpath, const std::string& property, const int& dflt) -{ - const std::string path( SnowData_xpath+xpath+":"+property ); - const xmlXPathObjectPtr xpathObj( xmlXPathEvalExpression((const xmlChar*)path.c_str(), in_xpathCtx) ); - int val = IOUtils::inodata; - - if (xpathObj->nodesetval->nodeNr > 0) - sscanf((const char*)xmlNodeGetContent(xpathObj->nodesetval->nodeTab[0]), "%d", &val); - else - val = dflt; - - xmlXPathFreeObject(xpathObj); - return val; -} - -void CaaMLIO::setCustomSnowSoil(SN_SNOWSOIL_DATA& Xdata) -{ - const std::string xpath( "/caaml:customData/snp" ); - Xdata.Albedo = xmlSetVal(xpath,"Albedo",0.6); - Xdata.SoilAlb = xmlSetVal(xpath,"SoilAlb",0.2); - Xdata.BareSoil_z0 = xmlSetVal(xpath,"BareSoil_z0",0.02); - Xdata.Canopy_Height = xmlSetVal(xpath,"CanopyHeight",0.); - Xdata.Canopy_LAI = xmlSetVal(xpath,"CanopyLAI",0.); - Xdata.Canopy_BasalArea = xmlSetVal(xpath,"CanopyBasalArea",0.); - Xdata.Canopy_Direct_Throughfall = xmlSetVal(xpath,"CanopyDirectThroughfall",1.); - Xdata.WindScalingFactor = xmlSetVal(xpath,"WindScalingFactor",1.); - Xdata.ErosionLevel = xmlSetVal(xpath,"ErosionLevel",0); - Xdata.TimeCountDeltaHS = xmlSetVal(xpath,"TimeCountDeltaHS",0.); -} - -//Direction in which the layers should be read and stored in SSdata -bool CaaMLIO::getLayersDir() -{ - const xmlXPathObjectPtr xpathObj( xmlXPathEvalExpression((const xmlChar*)SnowData_xpath.c_str(),in_xpathCtx) ); - const std::string direction( (const char*)xmlGetProp(xpathObj->nodesetval->nodeTab[0],(const xmlChar*)"dir") ); - - return (direction!="bottom up"); //standard direction -> false, otherwise "true" for Reverse direction -} - -LayerData CaaMLIO::xmlGetLayer(xmlNodePtr cur) -{ - std::string code; - - LayerData Layer; - if (cur->type == XML_ELEMENT_NODE) { - //Loop on the children - for (xmlNode *cur_c = cur->children; cur_c; cur_c = cur_c->next) { - if (cur_c->type != XML_TEXT_NODE) { - const std::string field_name( (const char*)cur_c->name ); - //Ignore some fields - if (field_name!="customData" && field_name!="comment" && field_name!="metaDataProperty") { - //Default reading - if (field_name!="grainSize") { - const xmlChar* unit; - if (strcmp((const char*)cur_c->ns->prefix,"slf")) { - unit = (const xmlChar*) "uom"; - } else { - unit = (const xmlChar*) "unit"; - } - if (!strcmp((const char*) cur_c->name, "depthTop")) { - double z; - sscanf((const char*) xmlNodeGetContent(cur_c),"%lf",&z); - } else if (!strcmp((const char*) cur_c->name, "thickness")) { - double temp; - sscanf((const char*) xmlNodeGetContent(cur_c),"%lf",&temp); - Layer.hl = unitConversion(temp,(char*)xmlGetProp(cur_c,unit),(char*)"m"); - Layer.ne = (size_t) ceil(Layer.hl/0.02); - } else if (!strcmp((const char*) cur_c->name, "hardness")) { - //const double hard = hardness_codeToVal((char*) xmlNodeGetContent(cur_c)); - } else if (!strcmp((const char*) cur_c->name, "lwc")) { - Layer.phiWater = lwc_codeToVal((char*) xmlNodeGetContent(cur_c)); - } else if (!strcmp((const char*) cur_c->name, "grainFormPrimary")) { - //code = (char*) xmlNodeGetContent(cur_c); - code = std::string( (char*)xmlNodeGetContent(cur_c) ); - grainShape_codeToVal(code, Layer.sp, Layer.dd, Layer.mk); - } - //Treating "grainSize" field - } else { - for (xmlNode *cur_cc = cur_c->children; cur_cc; cur_cc = cur_cc->next) { - if (cur_cc->type != XML_TEXT_NODE) { - for (xmlNode *cur_ccc = cur_cc->children; cur_ccc; cur_ccc = cur_ccc->next) { - if (cur_ccc->type != XML_TEXT_NODE) { - if (!strcmp((const char*) cur_ccc->name, "avg")) { - sscanf((const char*) xmlNodeGetContent(cur_ccc),"%lf",&Layer.rg); - Layer.rg = unitConversion(Layer.rg,(char*)xmlGetProp(cur_c,(const xmlChar*)"uom"),(char*)"mm")/2.; - Layer.rb = Layer.rg/4.; - } - } - } - } - } - } - } - } - } - } else { - cur = cur->next; - } - - if (Layer.rg == 0.) { - if (code=="IF") { - Layer.rg = 3./2.; - Layer.rb = 3./8.; - } else { - throw IOException("Grain size missing for a non-ice layer!", AT); - } - } - - return Layer; -} - -void CaaMLIO::getProfiles(const std::string path, std::vector &depths, std::vector &val) -{ - const xmlNodeSetPtr data( xmlGetData(SnowData_xpath+path) ); - const size_t nrElem = static_cast(data->nodeNr); - depths.resize(nrElem); - val.resize(nrElem); - - //double l; - //Loop on the nodes - for (size_t ii=0; iinodeTab[ii]->type == XML_ELEMENT_NODE) { - //Loop on the children - for (xmlNode *cur_c = data->nodeTab[ii]->children; cur_c; cur_c = cur_c->next) { - if (cur_c->type != XML_TEXT_NODE) { - const std::string field_name( (const char*)cur_c->name ); - //Ignore some fields - if (field_name!="customData" && field_name!="comment" && field_name!="metaDataProperty") { - std::string name( field_name ); - if (name.compare(0,4,"snow")==0) name.erase(0,4); - - if (!name.empty()) name[0] = (const char)std::toupper( name[0] ); - const std::string unitname( "uom"+name ); - - if (name=="Temp" || name=="Density" || name=="Hardness") { - sscanf((const char*) xmlNodeGetContent(cur_c), "%lf", &val[ii]); - if (name=="Temp") - val[ii] = unitConversion(val[ii],(char*)xmlGetProp(data->nodeTab[ii]->parent,(const xmlChar*)unitname.c_str()),(char*)"K"); - } else if (name.compare(0,5,"Depth")==0) { - sscanf((const char*) xmlNodeGetContent(cur_c), "%lf", &depths[ii]); - depths[ii] = unitConversion(depths[ii],(char*)xmlGetProp(data->nodeTab[ii]->parent,(const xmlChar*)unitname.c_str()),(char*)"m"); - /*if (ii>0 && k>0) { - if (abs(depths[ii-1]-depths[ii]) != l) { - cout << "Warning: inconsistent " << name << " layers (depths and thicknesses do not match)." << endl; - } - }*/ - } else if (name=="Thickness") { - //sscanf((const char*) xmlNodeGetContent(cur_c), "%lf", &l); - } - } - } - } - } - } - - //If necessary, reverse order - if (depths.size()>=2 && depths[0] &Layers, std::vector > depths, std::vector > val) -{ - double z = 0.; - for (size_t ii=0; ii0 && z>depths[0][ind]) - Layers[ii].tl += (val[0][ind]-val[0][ind-1])*(z-depths[0][ind])/(depths[0][ind]-depths[0][ind-1]); - - //Compute average density and hardness in the layer - for (size_t k=1; k<3; k++) { - ind = 0; - double zprev=z, cumsum=0, weights=0; - while (indz-Layers[ii].hl) { - ind++; - if (depths[k][ind]-z < 1e-12) { - if (depths[k][ind]<=z-Layers[ii].hl) { - cumsum += val[k][ind-1]*(zprev-(z-Layers[ii].hl)); - weights += (zprev-(z-Layers[ii].hl)); - } else { - cumsum += val[k][ind-1]*(zprev-depths[k][ind]); - weights += (zprev-depths[k][ind]); - zprev = depths[k][ind]; - } - } - } - if (k==1) { - Layers[ii].phiIce = (cumsum/weights)/Constants::density_ice; - } - } - } -} - -void CaaMLIO::setCustomLayerData(LayerData &Layer) { - const std::string xpath( "/caaml:stratProfile/caaml:Layer/caaml:customData/snp" ); - Layer.phiSoil = xmlSetVal(xpath,"phiSoil",0.); - Layer.hr = xmlSetVal(xpath,"SurfaceHoarMass",0.); - Layer.CDot = xmlSetVal(xpath,"StressRate",0.); - Layer.metamo = xmlSetVal(xpath,"Metamorphism",0.); -} - -//Set the deposition date of the layers based on their arrangement (if no data in the file) -void CaaMLIO::setDepositionDates(std::vector &Layers, const Date profileDate) -{ - for (size_t ii=0; iinodesetval->nodeNr) { - const std::string date_str( (char*) xmlNodeGetContent(xmlGetData(SnowData_xpath+"/caaml:stratProfile/caaml:Layer/caaml:customData/snp:DepositionDate")->nodeTab[0]) ); - Date date; - IOUtils::convertString(date, date_str, in_tz); - Layers[ii].depositionDate = date; - } else { - const unsigned int snowType = ElementData::snowType(Layers[ii].dd,Layers[ii].sp,Layers[ii].rg,Layers[ii].mk,Layers[ii].phiWater,ElementData::snowResidualWaterContent(Layers[ii].phiIce)); - const unsigned int a = (unsigned int) (snowType/100.); - if (ii==0) { - if (a==6) { - Layers[ii].depositionDate = profileDate; - } else if (a==0 || a==1) { - Layers[ii].depositionDate = profileDate-(Date)1./2440638.; - } else { - Layers[ii].depositionDate = profileDate-(Date)2./2440638.; - } - } else { - if ((a==0 || a==1) && (Layers[ii-1].depositionDate > profileDate-(Date)2./2440638.)) { - Layers[ii].depositionDate = profileDate-(Date)1./2440638.; - } else { - Layers[ii].depositionDate = profileDate-(Date)2./2440638.; - } - } - } - } -} - -/** - * @brief This routine writes the status of the snow cover at program termination and at specified backup times - * @param date current - * @param Xdata - * @param Zdata - * @param forbackup dump Xdata on the go - */ -void CaaMLIO::writeSnowCover(const Date& date, const SnowStation& Xdata, - const ZwischenData& Zdata, const bool& forbackup) -{ - std::string snofilename( getFilenamePrefix(Xdata.meta.getStationID().c_str(), o_snowpath) + ".caaml" ); - std::string hazfilename( getFilenamePrefix(Xdata.meta.getStationID().c_str(), o_snowpath) + ".haz" ); - - if (forbackup) { - stringstream ss; - ss << (int)(date.getJulian() + 0.5); //HACK - snofilename += ss.str(); - hazfilename += ss.str(); - } - - writeSnowFile(snofilename, date, Xdata, aggregate_caaml); - SmetIO::writeHazFile(hazfilename, date, Xdata, Zdata); -} - -void CaaMLIO::writeSnowFile(const std::string& snofilename, const Date& date, const SnowStation& Xdata, - const bool /*aggregate*/) -{ - xmlTextWriterPtr writer( xmlNewTextWriterFilename(snofilename.c_str(), 0) ); - xmlTextWriterSetIndent(writer,3); - xmlTextWriterStartDocument(writer, NULL, "UTF-8", NULL); - - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":SnowProfile").c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)("xmlns:"+string((const char*)xml_ns_abrev_caaml)).c_str(),xml_ns_caaml); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)("xmlns:"+string((const char*)xml_ns_abrev_gml)).c_str(),xml_ns_gml); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)("xmlns:"+string((const char*)xml_ns_abrev_xsi)).c_str(),xml_ns_xsi); - // xmlTextWriterWriteAttribute(writer,(const xmlChar*)"xsi:schemaLocation",(const xmlChar*)(string((const char*)xml_ns_snp)+" "+xml_schemaLocation_snp).c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"xmlns:snp",(const xmlChar*)xml_ns_snp); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)("xmlns:"+string((const char*)xml_ns_abrev_slf)).c_str(),xml_ns_slf); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"gml:id",(const xmlChar*)("SLF_"+Xdata.meta.stationID).c_str()); - - //Required fields - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":metaDataProperty").c_str()); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":MetaData").c_str()); - time_t now; //HACK - time(&now); - struct tm *timeinfo = localtime(&now); - strftime(dateStr,30,"%FT%T.000+01:00",timeinfo); - writeDate(writer,":dateTimeReport",dateStr); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":srcRef").c_str()); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":Operation").c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"gml:id",(const xmlChar*)"OPERATION_ID"); - xmlWriteElement(writer,(namespaceCAAML+":name").c_str(),"SNOWPACK","",""); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - - // Write profile date - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":validTime").c_str()); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":TimeInstant").c_str()); - const double tz = date.getTimeZone(); - sprintf(dateStr,"%s.000%+03d:%02d",date.toString(Date::ISO).c_str(),(int) tz,(int) (60*(tz-(int)tz))); //HACK: not (int)tz but floor(tz)! - writeDate(writer,":timePosition",dateStr); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - - // Write stratigraphic profile - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":snowProfileResultsOf").c_str()); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":SnowProfileMeasurements").c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"dir",(const xmlChar*)"top down"); - - //Write custom snow/soil data - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":customData").c_str()); - writeCustomSnowSoil(writer,Xdata); - xmlTextWriterEndElement(writer); - - // Write profile depth - sprintf(valueStr,"%.4f",100.*Xdata.cH); - xmlWriteElement(writer,(namespaceCAAML+":profileDepth").c_str(),valueStr,"uom","cm"); - - //Write height of snow and Snow Water Equivalent (SWE) - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":hS").c_str()); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":Components").c_str()); - sprintf(valueStr,"%.4f",100.*(Xdata.cH - Xdata.Ground)); - xmlWriteElement(writer,(namespaceCAAML+":snowHeight").c_str(),valueStr,"uom","cm"); - sprintf(valueStr,"%.2f",Xdata.swe); - xmlWriteElement(writer,(namespaceCAAML+":swe").c_str(),valueStr,"uom","mm"); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - - //Write layers and quantity profiles - writeLayers(writer,Xdata); - writeProfiles(writer,Xdata); - - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); // end stratigraphic profile - - // Write station data - writeStationData(writer,Xdata); - - xmlTextWriterEndElement(writer); - - xmlTextWriterEndDocument(writer); - xmlFreeTextWriter(writer); -} - -void CaaMLIO::xmlWriteElement(const xmlTextWriterPtr writer, const char* name, const char* content, const char* att_name, const char* att_val) -{ - xmlTextWriterStartElement(writer, (const xmlChar*) name); - if (strcmp(att_name,"")) //ie: string not empty - xmlTextWriterWriteAttribute(writer, (const xmlChar*) att_name, (const xmlChar*) att_val); - xmlTextWriterWriteString(writer, (const xmlChar*) content); - xmlTextWriterEndElement(writer); -} - -// void CaaMLIO::writeDate(const xmlTextWriterPtr writer, const Date date) -void CaaMLIO::writeDate(const xmlTextWriterPtr writer, const char* att_name, const char* att_val) -{ - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+att_name).c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"xmlns:xs",(const xmlChar*)("http://www.w3.org/2001/XMLSchema")); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"xsi:type",(const xmlChar*)("xs:dateTime")); //,timeNow); - xmlTextWriterWriteString(writer, (const xmlChar*) att_val); - xmlTextWriterEndElement(writer); -} - -void CaaMLIO::writeCustomSnowSoil(const xmlTextWriterPtr writer, const SnowStation& Xdata) -{ - sprintf(valueStr,"%.4f",Xdata.Albedo); - xmlWriteElement(writer,(namespaceSNP+":Albedo").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Xdata.SoilAlb); - xmlWriteElement(writer,(namespaceSNP+":SoilAlb").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Xdata.BareSoil_z0); - xmlWriteElement(writer,(namespaceSNP+":BareSoil_z0").c_str(),valueStr,"uom","m"); - sprintf(valueStr,"%.4f",Xdata.Cdata.height); - xmlWriteElement(writer,(namespaceSNP+":CanopyHeight").c_str(),valueStr,"uom","m"); - sprintf(valueStr,"%.4f",Xdata.Cdata.lai); - xmlWriteElement(writer,(namespaceSNP+":CanopyLAI").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Xdata.Cdata.BasalArea); - xmlWriteElement(writer,(namespaceSNP+":CanopyBasalArea").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Xdata.Cdata.throughfall); - xmlWriteElement(writer,(namespaceSNP+":CanopyDirectThroughfall").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Xdata.WindScalingFactor); - xmlWriteElement(writer,(namespaceSNP+":WindScalingFactor").c_str(),valueStr,"",""); - sprintf(valueStr,"%d",static_cast(Xdata.ErosionLevel)); - xmlWriteElement(writer,(namespaceSNP+":ErosionLevel").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Xdata.TimeCountDeltaHS); - xmlWriteElement(writer,(namespaceSNP+":TimeCountDeltaHS").c_str(),valueStr,"",""); -} - -void CaaMLIO::writeLayers(const xmlTextWriterPtr writer, const SnowStation& Xdata) -{ - xmlTextWriterStartElement( writer,(const xmlChar*)(namespaceCAAML+":stratProfile").c_str() ); - if (!Xdata.Edata.empty()) { - for (size_t ii = Xdata.Edata.size(); ii-->0;) { - const bool snowLayer = (ii >= Xdata.SoilNode); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":Layer").c_str()); - - // Write custom layer data - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":customData").c_str()); - writeCustomLayerData(writer,Xdata.Edata[ii],Xdata.Ndata[ii]); - xmlTextWriterEndElement(writer); - - // Write snow and soil layer data - sprintf(layerDepthTopStr,"%.4f",100.*(Xdata.cH - Xdata.Ndata[ii+1].z)); - xmlWriteElement(writer,(namespaceCAAML+":depthTop").c_str(),layerDepthTopStr,"uom","cm"); - sprintf(layerThicknessStr,"%.4f",100.*Xdata.Edata[ii].L); - xmlWriteElement(writer,(namespaceCAAML+":thickness").c_str(),layerThicknessStr,"uom","cm"); - - if (snowLayer) { - //const unsigned int snowType = ElementData::snowType(Xdata.Edata[ii].dd, Xdata.Edata[ii].sp, Xdata.Edata[ii].rg, Xdata.Edata[ii].mk, Xdata.Edata[ii].theta[WATER], Xdata.Edata[ii].res_wat_cont); - const unsigned int snowType = Xdata.Edata[ii].getSnowType(); - const unsigned int a = snowType/100; - const unsigned int b = (snowType-100*a)/10; - const unsigned int c = snowType-100*a-10*b; - if (c != 2) { - xmlWriteElement(writer,(namespaceCAAML+":grainFormPrimary").c_str(),grainShape_valToAbbrev(a).c_str(),"",""); - } else { - xmlWriteElement(writer,(namespaceCAAML+":grainFormPrimary").c_str(),"MFcr","",""); - } - xmlWriteElement(writer,(namespaceCAAML+":grainFormSecondary").c_str(),grainShape_valToAbbrev(b).c_str(),"",""); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":grainSize").c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uom",(const xmlChar*)"mm"); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":Components").c_str()); - sprintf(layerValStr,"%.3f",2.*Xdata.Edata[ii].rg); - xmlWriteElement(writer,(namespaceCAAML+":avg").c_str(),layerValStr,"",""); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - } - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":validFormationTime").c_str()); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":TimeInstant").c_str()); - const double tz = Xdata.Edata[ii].depositionDate.getTimeZone(); - sprintf(dateStr,"%s.000%+03d:%02d",Xdata.Edata[ii].depositionDate.toString(Date::ISO).c_str(),(int) tz,(int) (60*(tz-(int)tz))); - writeDate(writer,":timePosition",dateStr); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - if (snowLayer) { - xmlWriteElement(writer,(namespaceCAAML+":hardness").c_str(),hardness_valToCode(Xdata.Edata[ii].hard).c_str(),"uom",""); //HACK: check values... seem always the same! - } - xmlWriteElement(writer,(namespaceCAAML+":lwc").c_str(),lwc_valToCode(Xdata.Edata[ii].theta[WATER]).c_str(),"uom",""); - sprintf(layerValStr,"%.2f",Xdata.Edata[ii].Rho); - xmlWriteElement(writer,(namespaceCAAML+":density").c_str(),layerValStr,"uom","kgm-3"); - // snow properties only - if (snowLayer) { - sprintf(layerValStr,"%.2f",Xdata.Edata[ii].ogs); - xmlWriteElement(writer,(namespaceCAAML+":specSurfArea").c_str(),layerValStr,"uom","m2kg-1"); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":layerStrength").c_str()); - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":Components").c_str()); - sprintf(layerValStr,"%.2f",Xdata.Edata[ii].s_strength); - xmlWriteElement(writer,(namespaceCAAML+":strengthValue").c_str(),layerValStr,"uom","Nm-2"); - xmlTextWriterEndElement(writer); - xmlTextWriterEndElement(writer); - // sprintf(layerValStr,"%.2f",Xdata.Edata[ii].solute); - // xmlWriteElement(writer,(namespaceCAAML+":impurities").c_str(),layerValStr,"uom",""); - xmlTextWriterEndElement(writer); - } - } - } - xmlTextWriterEndElement(writer); -} - -void CaaMLIO::writeCustomLayerData(const xmlTextWriterPtr writer, const ElementData& Edata, const NodeData& Ndata) -{ - // const double tz = Edata.depositionDate.getTimeZone(); - // sprintf(dateStr,"%s:00.000%+03d:%02d",Edata.depositionDate.toString(Date::ISO).c_str(),(int) tz,(int) (60*(tz-(int)tz))); - // xmlWriteElement(writer,(namespaceSNP+":DepositionDate").c_str(),dateStr,"",""); - sprintf(valueStr,"%.4f",Edata.theta[SOIL]); - xmlWriteElement(writer,(namespaceSNP+":phiSoil").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Edata.soil[2]); - xmlWriteElement(writer,(namespaceSNP+":SoilRho").c_str(),valueStr,"uom","kgm-3"); - sprintf(valueStr,"%.4f",Edata.soil[0]); - xmlWriteElement(writer,(namespaceSNP+":SoilK").c_str(),valueStr,"uom","Wm-1s-1"); - sprintf(valueStr,"%.4f",Edata.soil[1]); - xmlWriteElement(writer,(namespaceSNP+":SoilC").c_str(),valueStr,"uom","Jkg-1"); - sprintf(valueStr,"%.4f",Edata.rb); - xmlWriteElement(writer,(namespaceSNP+":bondSize").c_str(),valueStr,"",""); - sprintf(valueStr,"%.2f",Edata.dd); - xmlWriteElement(writer,(namespaceSNP+":dendricity").c_str(),valueStr,"",""); - sprintf(valueStr,"%.2f",Edata.sp); - xmlWriteElement(writer,(namespaceSNP+":sphericity").c_str(),valueStr,"",""); - sprintf(valueStr,"%4lu",Edata.mk); - xmlWriteElement(writer,(namespaceSNP+":marker").c_str(),valueStr,"",""); - sprintf(valueStr,"%.4f",Ndata.hoar); - xmlWriteElement(writer,(namespaceSNP+":SurfaceHoarMass").c_str(),valueStr,"uom","kgm-2"); - xmlWriteElement(writer,(namespaceSNP+":ne").c_str(),"1","",""); - sprintf(valueStr,"%.4f",Edata.CDot); - xmlWriteElement(writer,(namespaceSNP+":StressRate").c_str(),valueStr,"uom","Nm-2s-1"); - sprintf(valueStr,"%.4f",Edata.metamo); - xmlWriteElement(writer,(namespaceSNP+":Metamorphism").c_str(),valueStr,"",""); -} - -void CaaMLIO::writeProfiles(const xmlTextWriterPtr writer, const SnowStation& Xdata) -{ - // temperature profile - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":tempProfile").c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uomDepth",(const xmlChar*)"cm"); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uomTemp",(const xmlChar*)"degC"); - if (!Xdata.Ndata.empty()) { - for (size_t ii = Xdata.Ndata.size(); ii-->0;) { - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":Obs").c_str()); - sprintf(layerDepthTopStr,"%.4f",100*(Xdata.cH - Xdata.Ndata[ii].z)); - xmlWriteElement(writer,(namespaceCAAML+":depth").c_str(),layerDepthTopStr,"",""); - sprintf(valueStr,"%.3f",unitConversion(Xdata.Ndata[ii].T,(char*)"degK",(char*)"degC")); - xmlWriteElement(writer,(namespaceCAAML+":snowTemp").c_str(),valueStr,"",""); - xmlTextWriterEndElement(writer); - } - } - xmlTextWriterEndElement(writer); //end temperature profile - - // density profile; not needed, erase later - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":densityProfile").c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uomDepthTop",(const xmlChar*)"cm"); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uomThickness",(const xmlChar*)"cm"); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uomDensity",(const xmlChar*)"kgm-3"); - if (!Xdata.Edata.empty()) { - for (size_t ii = Xdata.Edata.size(); ii-->0;) { - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":Layer").c_str()); - sprintf(layerDepthTopStr,"%.4f",100*(Xdata.cH - Xdata.Ndata[ii+1].z)); - xmlWriteElement(writer,(namespaceCAAML+":depthTop").c_str(),layerDepthTopStr,"",""); - sprintf(layerThicknessStr,"%.4f",100*Xdata.Edata[ii].L); - xmlWriteElement(writer,(namespaceCAAML+":thickness").c_str(),layerThicknessStr,"",""); - sprintf(valueStr,"%.2f",Xdata.Edata[ii].Rho); - xmlWriteElement(writer,(namespaceCAAML+":density").c_str(),valueStr,"",""); - xmlTextWriterEndElement(writer); - } - } - xmlTextWriterEndElement(writer); //end densityProfile -} - -void CaaMLIO::writeStationData(const xmlTextWriterPtr writer, const SnowStation& Xdata) -{ - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":locRef").c_str()); //start locRef - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":ObsPoint").c_str()); //start ObsPoint - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"gml:id",(const xmlChar*)("SLF_"+Xdata.meta.stationID+"_1").c_str()); - xmlWriteElement(writer,(namespaceCAAML+":name").c_str(),(const char*) Xdata.meta.stationName.c_str(),"",""); - xmlWriteElement(writer,(namespaceCAAML+":obsPointSubType").c_str(),"","",""); - - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":validElevation").c_str()); //start validElevation - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":ElevationPosition").c_str()); //start ElevationPosition - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uom",(const xmlChar*)"m"); - char elevStr[5]; - sprintf(elevStr,"%.0f",Xdata.meta.position.getAltitude()); - xmlWriteElement(writer,(namespaceCAAML+":position").c_str(),elevStr,"",""); - xmlTextWriterEndElement(writer); //end ElevationPosition - xmlTextWriterEndElement(writer); //end validElevation - - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":validAspect").c_str()); //start validAspect - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":AspectPosition").c_str()); //start AspectPosition - xmlWriteElement(writer,(namespaceCAAML+":position").c_str(),IOUtils::bearing(Xdata.meta.getAzimuth()).c_str(),"",""); - xmlTextWriterEndElement(writer); //end AspectPosition - xmlTextWriterEndElement(writer); //end validAspect - - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":validSlopeAngle").c_str()); //start validSlopeAngle - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":SlopeAnglePosition").c_str()); //start SlopeAnglePosition - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"uom",(const xmlChar*)"deg"); - char slopStr[5]; - sprintf(slopStr,"%.0f",Xdata.meta.getSlopeAngle()); - xmlWriteElement(writer,(namespaceCAAML+":position").c_str(),slopStr,"",""); - xmlTextWriterEndElement(writer); //end SlopeAnglePosition - xmlTextWriterEndElement(writer); //end validSlopeAngle - - xmlTextWriterStartElement(writer,(const xmlChar*)(namespaceCAAML+":pointLocation").c_str()); //start pointLocation - xmlTextWriterStartElement(writer,(const xmlChar*)"gml:Point"); //start gml:Point - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"gml:id",(const xmlChar*)("SLF_"+Xdata.meta.stationID+"_2").c_str()); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"srsName",(const xmlChar*)"urn:ogc:def:crs:OGC:1.3:CRS84"); - xmlTextWriterWriteAttribute(writer,(const xmlChar*)"srsDimension",(const xmlChar*)"2"); - char posStr[30]; - sprintf(posStr,"%f %f",Xdata.meta.position.getLat(),Xdata.meta.position.getLon()); - xmlWriteElement(writer,"gml:pos",posStr,"",""); - xmlTextWriterEndElement(writer); //end gml:Point - xmlTextWriterEndElement(writer); //end pointLocation - - xmlTextWriterEndElement(writer); //end ObsPoint - xmlTextWriterEndElement(writer); //end locRef -} - -void CaaMLIO::writeTimeSeries(const SnowStation& /*Xdata*/, const SurfaceFluxes& /*Sdata*/, const CurrentMeteo& /*Mdata*/, - const ProcessDat& /*Hdata*/, const double /*wind_trans24*/) -{ - throw IOException("Nothing implemented here!", AT); -} - -void CaaMLIO::writeProfile(const Date& /*date*/, const SnowStation& /*Xdata*/) -{ - throw IOException("Nothing implemented here!", AT); -} - -bool CaaMLIO::writeHazardData(const std::string& /*stationID*/, const std::vector& /*Hdata*/, - const std::vector& /*Hdata_ind*/, const size_t& /*num*/) -{ - throw IOException("Nothing implemented here!", AT); -} - -/** - * @brief Convert from liquid water content code to value - * @author Adrien Gaudard - * @param code Liquid water content code (one character) - * return Liquid water content value (fraction) - */ -double CaaMLIO::lwc_codeToVal(const char* code) -{ - if (!strcmp(code,"D")) return 0.; - if (!strcmp(code,"M")) return 0.01; - if (!strcmp(code,"W")) return 0.03; - if (!strcmp(code,"V")) return 0.08; - if (!strcmp(code,"S")) return 0.15; - - throw IOException("Unrecognized liquid water content code.", AT); -} - -/** - * @brief Convert from liquid water content value to code - * @author Adrien Gaudard - * @param val Liquid water content value (fraction) - * return Liquid water content code (one character) - */ -std::string CaaMLIO::lwc_valToCode(const double val) -{ - if (val == 0.00) return "D"; - if (val < 0.03) return "M"; - if (val < 0.08) return "W"; - if (val < 0.15) return "V"; - if (val < 1.00) return "S"; - - throw IOException("Invalid liquid water content value.", AT); -} - -/** - * @brief Convert from hardness code to value - * @author Adrien Gaudard - * @param code Hardness code - * return Hardness value (1 to 6) - */ -double CaaMLIO::hardness_codeToVal(char* code) -{ - double val = 0.; - unsigned int n = 0; - char* c[2]; - c[0] = strtok(code,"-"); - c[1] = strtok(NULL,"-"); - - for (size_t i=0; i<2; i++) { - if (c[i]) { - n++; - if (!strcmp(c[i],"F")) { - val += 1.; - } else if (!strcmp(c[i],"4F")) { - val += 2.; - } else if (!strcmp(c[i],"1F")) { - val += 3.; - } else if (!strcmp(c[i],"P")) { - val += 4.; - } else if (!strcmp(c[i],"K")) { - val += 5.; - } else if (!strcmp(c[i],"I")) { - val += 6.; - } else { - throw IOException("Unrecognized hardness code.", AT); - } - } - } - return val/n; -} - -/** - * @brief Convert from hardness value to code - * @author Adrien Gaudard - * @param val Hardness value (1 to 6) - * return Hardness code - */ -std::string CaaMLIO::hardness_valToCode(const double val) -{ - if (val == 1.0) return "F"; - if (val == 1.5) return "F-4F"; - if (val == 2.0) return "4F"; - if (val == 2.5) return "4F-1F"; - if (val == 3.0) return "1F"; - if (val == 3.5) return "1F-P"; - if (val == 4.0) return "P"; - if (val == 4.5) return "P-K"; - if (val == 5.0) return "K"; - if (val == 5.5) return "K-I"; - if (val == 6.0) return "I"; - - throw IOException("Unrecognized hardness value.", AT); -} - -/** - * @brief Convert from grain shape code to values (sphericity, dendricity, marker) - * @author Adrien Gaudard - * @param[in] code Grain shape code - * @param[out] sp sphericity - * @param[out] dd dendricity - * @param[out] mk micro-structure marker - */ -void CaaMLIO::grainShape_codeToVal(const std::string& code, double &sp, double &dd, unsigned short int &mk) -{ - if (code=="PP") { - sp = 0.5; dd = 1.; mk = 0; - } else if (code=="DF") { - sp = 0.5; dd = 0.5; mk = 0; - } else if (code=="RG") { - sp = 1.; dd = 0.; mk = 2; - } else if (code=="FC") { - sp = 0.; dd = 0.; mk = 1; - } else if (code=="DH") { - sp = 0.; dd = 0.; mk = 1; - } else if (code=="SH") { - sp = 0.; dd = 0.; mk = 1; - } else if (code=="MF") { - sp = 1.; dd = 0.; mk = 2; - } else if (code=="IF") { - sp = 1.; dd = 0.; mk = 2; - } else { - throw IOException("Unrecognized grain shape code.", AT); - } -} - -/** - * @brief Convert from grain shape value to code - * @author Adrien Gaudard - * @param var Grain shape value - * return Grain shape code - */ -std::string CaaMLIO::grainShape_valToAbbrev(const unsigned int var) -{ - if (var == 0) return "PPgp"; - if (var == 1) return "PP"; - if (var == 2) return "DF"; - if (var == 3) return "RG"; - if (var == 4) return "FC"; - if (var == 5) return "DH"; - if (var == 6) return "SH"; - if (var == 7) return "MF"; - if (var == 8) return "IF"; - if (var == 9) return "FCxr"; - - throw IOException("Unrecognized grain shape code.", AT); -} - -/** - * @brief Convert from grain shape values (sphericity, dendricity, marker) to two-character code - * @author Adrien Gaudard - * @param var Grain shape values (sphericity, dendricity, marker) (AMBIGUOUS) - * return Grain shape two-character code (NOT ALL REPRESENTED) - */ -std::string CaaMLIO::grainShape_valToAbbrev_old(const double* var) -{ - const double sp = ((int)(var[0]*10+0.5))/10.; - const double dd = ((int)(var[1]*10+0.5))/10.; - const double mk = ((int)(var[2]*10+0.5))/10.; - - if (sp == 0.5 && dd == 1. && mk == 0.) return "PP"; - if (sp == 0.5 && dd == 0.5 && mk == 0.) return "DF"; - if (sp == 1. && dd == 0. && (mk == 2. || mk == 12.)) return "RG"; - if (sp == 0. && dd == 0. && mk == 1.) return "FC"; - if (sp == 0. && dd == 0. && mk == 1.) return "DH"; - if (sp == 0. && dd == 0. && mk == 1.) return "SH"; - if (sp == 1. && dd == 0. && mk == 2.) return "MF"; - if (sp == 1. && dd == 0. && mk == 2.) return "IF"; - - throw IOException("Unrecognized set of grain shape values.", AT); -} +/***********************************************************************************/ +/* Copyright 2014 WSL Institute for Snow and Avalanche Research SLF-DAVOS */ +/***********************************************************************************/ +/* This file is part of Snowpack. + MeteoIO is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + MeteoIO is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with MeteoIO. If not, see . +*/ +#include "CaaMLIO.h" +#include "../Utils.h" +//#include + +#include +#include +#include + +#include + +using namespace std; +using namespace mio; + +/** + * @page caaml CAAML + * @section caaml_format Format + * This plugin reads the CAAML files as generated according CAAML V6.0.3's + * specification. In order to validate + * a CAAML file, download the xsd + * file and use either an online XML validator or an offline tool (such as Notepad++'s XML tool). + * + * @section caaml_keywords Keywords + * This plugin uses the following keywords (all specified in the [Input] section): + * - COORDSYS: input coordinate system (see Coords) + * - SNOW: specify CAAML to read in a caaml file + * - SNOWPATH: string containing the path to the caaml files to be read + * - SNOWFILE: specify the caaml file to read the data from + * - XML_ENCODING: force the input file encoding, overriding the file's own encoding declaration (optional, see \ref caaml_encoding "XML encoding" below) + * - CAAML_MAX_ELEMENT_THICKNESS: if set (and non-zero), the thickness of the elements will be set to this value, otherwise each element will correspond to + * one stratigraphic layer. Recommendation: set this value to 0.002 (= 2 mm) + * - CAAML_WRITEOUT_AS_READIN: if set to true, a caaml will be written just after reading in, to check if the reading of the caaml was correct. + * + * @section caaml_reading Reading a caaml-file + * Data which is important for a snowpack-simulation but usually not given in a caaml-file (like dendricity for example), can be included in a caaml-file + * as snowpack-custom-data with the prefix "snp". A caaml-file written out by this plugin will contain this data. However, if this data is not available, + * the corresponding values will be estimated or set to default values: + * - snowpack-custom-data for the whole snowpack (like Albedo, WindScalingFactor,...) will be set to default values. + * - layer-custom-data (like dendricity, sphericity, maker,...) will be estimated from primary grain form, grain size and temperature. + * - the formation time of a layer will be estimated depending on snow type and layer location. + * + * The liquid water content (lwc) of a layer will be read in from the lwc-profile. If there is no lwc-profile in the caaml-file, the lwc will be estimated + * from the wetness-codes (D, M, W,...) given in the stratigraphic profile. + * + * Besides the lwc-profile the density- and temperature-profile are read in from the caaml-file. The lwc-profile is optional, but the temperature- and + * density-profile have to be given in the caaml-file. + * + * Consistency checks + * + * When reading in a caaml-file, the data will be checked for consistency. If there is an inconsistency a warning will be printed and values will be adjusted, + * if possible. Otherwise an exception is thrown. + * + * Warnings: + * - total snow height given in is different from the sum of the layer-thicknesses. The snow height given in will be + * ignored. + * - grain size of a surface-hoar-layer differs from the surface-hoar-layer-thickness. Adjustment: Grain size will be set to layer thickness. + * - temperature of a layer is above 0°C. Adjustment: Set temperature to 0°C. + * - liquid water content (lwc) is greater than 0 and temperature is below 0°C. Adjustment: Set lwc to 0. + * - grain form is "FC" and grain size is above 1.6 mm. Adjustment: nothing. + * - grain form is "DH" and grain size is below 1.4 mm. Adjustment: nothing. + * + * Exceptions: + * - grain size is 0 and grain form is not "IF" + * - slope angle is > 0° and no azimuth is given. (If the slope angle is not given, the slope angle is set to 0°.) + * - missing data / wrong camml-version ( != 6.0.3) + * - wrong syntax in caaml-file (pointy brackets, matching quotes,...) + * + * @subsection caaml_encoding XML encoding + * Each XML document should specify its encoding. However this information might sometimes be missing or even worse, be false. This makes the XML document non-compliant. + * Normally, CAAML reads the file encoding in the file itself. If this does not work (one of the two cases given above), it is possible to force the + * encoding of the input file by using the "XML_ENCODING" option. This option takes one of the following values + * ("LE" stands for "Little Endian" and "BE" for "Big Endian"): + * UTF-8, UTF-16, UTF-16-LE, UTF-16-BE, UTF-32, UTF-32-LE, UTF-32-BE, LATIN1, ISO-8859-1, WCHAR + * + * @section caaml_writing Writing a caaml-file + * This is an example of a caaml-file written with this plugin: + * @code + * + * + * + * + * + * 2008-05-16T09:00:00+02:00 + * + * + * 2018-12-18T17:38:41+01:00 + * + * + * + * SNOWPACK v3.45 compiled on Dec 10 2018, 15:45:05. user: theile + * + * + *... + * @endcode + * Furthermore some data about the profile location and following profiles will be written: + * - stratigraphic profile + * - temperature profile + * - density profile + * - lwc profile + * - specific surface area profile + * - strength profile + * + * @section caaml_example Example + * @code + * [Input] + * COORDSYS = CH1903 + * SNOW = CAAML + * SNOWPATH = ./input/snowCAAMLdata + * SNOWFILE = 5WJ_20120229.caaml + * CAAML_MAX_ELEMENT_THICKNESS = 0.002 + * @endcode + * + */ + +/** + * @class DataResampler + * @brief Resample data. + * The data (double) is given as a x-vector xVec and y-Vector yVec. The new sampling can be defined by an interval dx or by an arbitrary + * x-vector xVecGoal. Two methods are implemented one for point data and one for range data. + * For point data the new values are interpolated and extrapolated. + * For range data the new range-values are averaged over the old ranges. + * The size of a range (=thickness of a layer) is defined by the difference: xVec[i+1]-xVec[i] and for the last range: xMax-xVec[i] + * Assumptions: + * - xVec[i] < xVec[i+1] + * - xVecGoal[i] < xVecGoal[i+1] + * + * Additional assumptions for the range data: + * - xVec[0] >= 0 + * - xVecGoal[0] >= 0 + * - the last value of xVec and xVecGoal has to be smaller (not equal!) than xMax + * + * Examples: + * Point-data example with rotation: + * Input: xVec:{10, 50, 184} yVec:{273.15, 263.15, 253.15} xVecGoal:{0, 84} xMax=184 + * Rotation result: xVec:{0, 134, 174} yVec:{253.15, 263.15, 273.15} + * Final (resampled) result: xVec:{0,84} yVec:{253.15, 259.42} + * + * Range-data example with rotation: + * Input: xVec:{0, 10, 50} yVec:{300,400,400} xVecGoal:{0,84} xMax=184 + * Rotation result: xVec:{0, 134, 174} yVec:{400,400,300} + * Final (resampled) result: xVec:{0,84} yVec:{400,390} + * + * @author Thiemo Theile + * @date 2018 + */ +class DataResampler { + public: + DataResampler(const std::vector& xVecIn, const std::vector& yVecIn, const double dxIn, const double xMaxIn, + const bool isRangeMeasurement, const bool changeDirectionIn); + DataResampler(const std::vector& xVecIn, const std::vector& yVecIn, const std::vector& xVecGoalIn, + const double xMaxIn, const bool isRangeMeasurement, const bool changeDirectionIn); + std::vector getResampledXVec(); + std::vector getResampledYVec(); + void printProfile(const std::string message); + bool resample(); + + private: + bool checkIfDataIsValid(); + void resamplePointValues(); + void resampleRangeValues(); + void createXVecGoalFromDx(const double dx); + void changeDirectionOfXAxis(); + + std::vector xVec; + std::vector yVec; + std::vector xVecGoal; + const double xMax; + const bool useMethodForRangeValues; //if true use rangeValueMethod, otherwise use pointValueMethod + const bool changeDirection; //if true change the direction of the x-Axis before resampling +}; + +DataResampler::DataResampler(const std::vector& xVecIn, const std::vector& yVecIn, const double dxIn, const double xMaxIn, + const bool isRangeMeasurement, const bool changeDirectionIn) + : xVec(xVecIn),yVec(yVecIn),xVecGoal(),xMax(xMaxIn),useMethodForRangeValues(isRangeMeasurement), + changeDirection(changeDirectionIn) +{ + createXVecGoalFromDx(dxIn); +} + +DataResampler::DataResampler(const std::vector& xVecIn, const std::vector& yVecIn, const std::vector& xVecGoalIn, + double xMaxIn, const bool isRangeMeasurement, const bool changeDirectionIn) + : xVec(xVecIn),yVec(yVecIn),xVecGoal(xVecGoalIn),xMax(xMaxIn), + useMethodForRangeValues(isRangeMeasurement), changeDirection(changeDirectionIn) +{ +} + +void DataResampler::printProfile(const std::string message="profile:") +{ + std::cout << message << std::endl; + std::cout << "Depth: Value: " << std::endl; + if(xVec.size() == yVec.size()){ + for (size_t ii=0;ii DataResampler::getResampledXVec() +{ + return xVec; +} + +std::vector DataResampler::getResampledYVec() +{ + return yVec; +} + +bool DataResampler::checkIfDataIsValid() +{ + //check if the assumptions for the data are fulfilled + if(xVec.size() != yVec.size()){ + return false; + } + if(xVec.size() < 1){ + return false; + } + if(xVecGoal.size() <1){ + return false; + } + for(size_t ii=0; ii= xVec[ii+1]){ + return false; + } + } + for(size_t ii=0; ii= xVecGoal[ii+1]){ + return false; + } + } + if(useMethodForRangeValues){ + if(xVecGoal[xVecGoal.size()-1] >= xMax){ + return false; + } + if(xVec[xVec.size()-1] >= xMax){ + return false; + } + if(xVecGoal[0] < 0){ + return false; + } + if(xVec[0] < 0){ + return false; + } + } + return true; +} + +bool DataResampler::resample() +{ + const bool dataIsValid = checkIfDataIsValid(); + if (dataIsValid==false) return false; + + //printProfile("original profile"); + if( changeDirection ){ + changeDirectionOfXAxis(); + //printProfile("rotated profile"); + } + if(useMethodForRangeValues){ + resampleRangeValues(); + }else{ + resamplePointValues(); + } + //printProfile("resampled profile"); + return true; +} + +void DataResampler::resamplePointValues() +{ + std::vector xVecResampled; + std::vector yVecResampled; + size_t index=1; + for (size_t indexGoal=0; indexGoal < xVecGoal.size(); indexGoal++){ + const double x=xVecGoal[indexGoal]; + //1. search for index so that xVec[index-1] y = 20 +(10-20)/(6-4)*(5-4) = 15 + const double y=yVec[index-1]+(yVec[index]-yVec[index-1])/(xVec[index]-xVec[index-1])*(x-xVec[index-1]); + xVecResampled.push_back(x); + yVecResampled.push_back(y); + } + xVec=xVecResampled; + yVec=yVecResampled; +} + +void DataResampler::resampleRangeValues() +{ + //append some values to the vectors for correct treatment of the last value: + xVec.push_back(xMax); + yVec.push_back(yVec[yVec.size()-1]); + xVecGoal.push_back(xMax); + std::vector xVecResampled; + std::vector yVecResampled; + + size_t index=1; + for (size_t indexGoal=0; indexGoal < xVecGoal.size()-1; indexGoal++){ + const double x=xVecGoal[indexGoal]; + const double dx=xVecGoal[indexGoal+1]-x; + //1. search for index so that xx+dx){ + xii=x+dx; + } + double weight = (xii-xiiPrevious)/dx; + xiiPrevious=xii; + y=y+weight*yi; + } + xVecResampled.push_back(x); + yVecResampled.push_back(y); + } + xVec=xVecResampled; + yVec=yVecResampled; +} + +/** + * @brief xVecGoal with a regular sampling (dx) is created + * Example: for dx=50, xMax=184: xVecGoal={0,50,100,150} + */ +void DataResampler::createXVecGoalFromDx(const double dx) +{ + xVecGoal.clear(); + double x=0; + while(x < xMax){ + xVecGoal.push_back(x); + x=x+dx; + } +} + +/** + * @brief The direction of the x-axis is changed and the origin is shifted to xMax. + * Example: xVec={0,40,90} xMax=100 is changed to: + * {10,60,100} for point-data + * {0,10,60} for range-data + */ +void DataResampler::changeDirectionOfXAxis() +{ + std::vector dxVec; + std::vector xVecTemp; + std::vector yVecTemp; + + //calculate thicknesses of the layers: + for (size_t ii=0; ii0) { + const bool directionTopDown = getLayersDir(); //Read profile direction + //Read quantity profiles (density, temperature and lwc) + //temperature data is needed. if not available an exception is thrown: + getAndSetProfile("/caaml:tempProfile/caaml:Obs","caaml:snowTemp",directionTopDown,false,SSdata.Ldata); + //density data is needed. if not available an exception is thrown: + getAndSetProfile("/caaml:densityProfile/caaml:Layer","caaml:density",directionTopDown,true,SSdata.Ldata); + //lwc data is optional. if not available, no problem: + getAndSetProfile("/caaml:lwcProfile/caaml:Layer","caaml:lwc",directionTopDown,true,SSdata.Ldata); + + adjustToSlopeAngle(SSdata); // #cmb + + checkAllDataForConsistencyAndSetMissingValues(SSdata); + } + //checkWhatWasReadIn(SSdata); + return true; +} + + +/** + * @brief This routine adjust the snow pack thickness/height to the slope angle (internal snow profile is always vertical to ground) + * @param SSdata data structure including all important station parameters as well as LayerData + * #cmb (needs to be called before checkAllDataForConsistency...) + */ +void CaaMLIO::adjustToSlopeAngle(SN_SNOWSOIL_DATA& SSdata) +{ + const double cos_sl = cos(SSdata.meta.getSlopeAngle()*mio::Cst::to_rad); + SSdata.Height = SSdata.Height * cos_sl; + SSdata.HS_last = SSdata.HS_last * cos_sl; + for (size_t ii=0; ii in the caaml-file + * @return data of one layer which was read in from the caaml-file + */ +LayerData CaaMLIO::xmlGetLayer(pugi::xml_node nodeLayer, std::string& grainFormCode) +{ + grainFormCode=""; + LayerData Layer; + for (pugi::xml_node node = nodeLayer.first_child(); node; node = node.next_sibling()){ + const std::string fieldName( node.name() ); + if(fieldName == "caaml:grainSize"){ + const std::string unitMeasured( node.attribute("uom").as_string() ); + if (xmlReadValueFromNode(node.child("caaml:Components").child("caaml:avg"),"caaml:avg",Layer.rg,"mm",unitMeasured,0.5)){ + Layer.rb = Layer.rg/4.; //this value will be replaced if there is this value in the customData... + }else if(xmlReadValueFromNode(node.child("caaml:Components").child("caaml:avgMax"),"caaml:avgMax",Layer.rg,"mm",unitMeasured,0.5)){ + Layer.rb = Layer.rg/4.; //this value will be replaced if there is this value in the customData... + } + } + if (xmlReadValueFromNode(node,"caaml:thickness",Layer.hl,"m")){ + if(i_max_element_thickness != IOUtils::nodata){ + Layer.ne = (size_t) ceil(Layer.hl/i_max_element_thickness); + }else{ + Layer.ne=1; + } + } + if (fieldName == "caaml:wetness"){ //this value will be replaced if a lwc-profile is in the caaml-file!!! + Layer.phiWater = lwc_codeToVal((char*) node.child_value()); + } + if (fieldName == "caaml:hardness"){ + //const double hardness = hardness_codeToVal((char*) node.child_value()); + //std::cout << "hardness (we have no member (of Layer) we could set this value to!!!): " << hardness << " " << std::endl; + } + if (fieldName == "caaml:grainFormPrimary"){ + grainFormCode = std::string( (char*)node.child_value() ); + grainShape_codeToVal(grainFormCode, Layer.sp, Layer.dd, Layer.mk); // these values will be replaced if there are values in the custom-snowpack-data + } + if (fieldName == "caaml:validFormationTime"){ + const std::string date_str( (char*) node.child("caaml:TimeInstant").child("caaml:timePosition").child_value() ); + Date date; + IOUtils::convertString(date, date_str, in_tz); + Layer.depositionDate = date; + } + } + + //read custom-snowpack-data. do this at the end to be sure to use these values (if available) + for (pugi::xml_node node = nodeLayer.child("caaml:metaData").child("caaml:customData").first_child(); node; node = node.next_sibling()) + { + xmlReadValueFromNode(node,"snp:dendricity",Layer.dd); + xmlReadValueFromNode(node,"snp:sphericity",Layer.sp); + xmlReadValueFromNode(node,"snp:bondSize",Layer.rb); + xmlReadValueFromNode(node,"snp:phiSoil",Layer.phiSoil); + xmlReadValueFromNode(node,"snp:SurfaceHoarMass",Layer.hr); + xmlReadValueFromNode(node,"snp:StressRate",Layer.CDot); + xmlReadValueFromNode(node,"snp:Metamorphism",Layer.metamo); + const std::string fieldName( node.name() ); + if (fieldName == "snp:marker") { + sscanf((const char*) node.child_value(),"%hu",&Layer.mk); + } + } + return Layer; +} + +/** + * @brief Read the stratigraphic layers from the caaml-file to SSdata + * @param SSdata + */ +void CaaMLIO::xmlReadLayerData(SN_SNOWSOIL_DATA& SSdata) +{ + const bool directionTopDown = getLayersDir(); //Read profile direction + + std::string path = SnowData_xpath+"/caaml:snowPackCond/caaml:hS/caaml:Components/caaml:height"; + const pugi::xml_node nodeHS = inDoc.first_element_by_path( (char*) path.c_str() ); + SSdata.HS_last=IOUtils::nodata; + xmlReadValueFromNode(nodeHS,"caaml:height",SSdata.HS_last,"m"); + + size_t nLayers=0; + path = SnowData_xpath+"/caaml:stratProfile/caaml:Layer"; + const pugi::xml_node nodeFirstLayer = inDoc.first_element_by_path( (char*) path.c_str() ); + if(nodeFirstLayer.empty()){ + throw NoDataException("Layer data not found in caaml-file. Expected path: '"+path+"'", AT); + } + pugi::xml_node nodeLastLayer; + for (pugi::xml_node node = nodeFirstLayer; node; node = node.next_sibling("caaml:Layer")){ + nLayers++; + nodeLastLayer=node; + } + SSdata.nLayers=nLayers; + SSdata.Ldata.resize(SSdata.nLayers, LayerData()); + grainForms.clear(); + std::string grainForm=""; + if (SSdata.nLayers>0) { + if (!directionTopDown) { + size_t ii=0; + for (pugi::xml_node node = nodeFirstLayer; node; node = node.next_sibling("caaml:Layer")){ + SSdata.Ldata[ii] = xmlGetLayer(node,grainForm); + grainForms.push_back(grainForm); + ii++; + } + }else{ + size_t ii=0; + for (pugi::xml_node node = nodeLastLayer; node; node = node.previous_sibling("caaml:Layer")){ + SSdata.Ldata[ii] = xmlGetLayer(node,grainForm); + grainForms.push_back(grainForm); + ii++; + } + } + //Estimate depostion dates (in case it was not in the caaml-file): + estimateValidFormationTimesIfNotSetYet(SSdata.Ldata,SSdata.profileDate); + } +} + +/** + * @brief Read profile data (density, temperature or lwc) from the caaml-file. + * The density- and temperature-profile have to be in the caaml-file. Otherwise an + * exception is thrown. The LWC-profile is optional and ignored if not available. + * @param path path to the profile data. e.g. "/caaml:densityProfile/caaml:Layer" + * @param name name of the xml-element to read. e.g. "caaml:snowTemp" + * @param zVec output: depth-values in meter + * @param valVec output: profile-values (valVec has the same length as zVec) + * @return true if the profile was read in successfully + */ +bool CaaMLIO::xmlGetProfile(const std::string path, const std::string name, std::vector& zVec, std::vector& valVec) +{ + //check if data exists: + if( !xmlDoesPathExist(SnowData_xpath+path)){ + if(name == "caaml:lwc"){ + //std::cout << "No lwc-profile in the caaml-file. lwc will be estimated from the wetness in the stratigraphic profile. " << std::endl; + return false; + }else{ + throw NoDataException("Invalid path for: '"+name+"'-profile. path: '"+path+"'", AT); + } + } + + std::string profilePath = SnowData_xpath+path; + const pugi::xml_node nodeFirstProfile = inDoc.first_element_by_path( (char*) profilePath.c_str() ); + zVec.resize(0); + valVec.resize(0); + for (pugi::xml_node nodeProfile = nodeFirstProfile; nodeProfile; nodeProfile = nodeProfile.next_sibling()){ + for (pugi::xml_node node = nodeProfile.first_child(); node; node = node.next_sibling()){ + const std::string fieldName( node.name() ); + if (fieldName == name){ + double val=0; + sscanf((const char*) node.child_value(), "%lf", &val); + if (name=="caaml:snowTemp"){ + val = unitConversion(val,(char*)node.attribute("uom").as_string(),(char*)"K"); + } + valVec.push_back(val); + } + if (fieldName == "caaml:depth" || fieldName == "caaml:depthTop") { + double z=0; + sscanf((const char*) node.child_value(), "%lf", &z); + z = unitConversion(z,(char*)node.attribute("uom").as_string(),(char*)"m"); + zVec.push_back(z); + } + } + } + return true; +} + +/** + * @brief Read profile data (density, temperature or lwc) from the caaml-file, rotate the direction + * of the data to "top up" and resample the data to the sampling of the stratigraphic data. + * @param path path to the profile data. e.g. "/caaml:densityProfile/caaml:Layer" + * @param name name of the xml-element to read. e.g. "caaml:snowTemp" + * @param directionTopDown true if the direction is top down + * @param isRangeMeasurement true if the data is a range measurement (like density or lwc), + * false if the data is a point measurement (like temperature) + * @param Layers output + */ +void CaaMLIO::getAndSetProfile(const std::string path, const std::string name, + const bool directionTopDown, const bool isRangeMeasurement,std::vector& Layers) +{ + std::vector zVec; + std::vector valVec; + //read the data from the Caaml-file: + bool profileExists = xmlGetProfile(path, name, zVec, valVec); + if (profileExists){ + //direction has to be bottomUp: + bool changeDirection=false; + if (directionTopDown) { + changeDirection=true; + } + //create a vector of the z-positions from the layerData. the profile will be resampled to these layers. + std::vector zVecLayers; + double z=0.; + for (size_t ii=0; ii &Layers, const Date profileDate) +{ + for (size_t ii=0; ii profileDate-(Date)2./2440638.)) { + Layers[ii].depositionDate = profileDate-(Date)1./2440638.; + } else { + Layers[ii].depositionDate = profileDate-(Date)2./2440638.; + } + } + } + } +} + +/** + * @brief Check station- and layer-data for consistency. If possible set reasonable values, otherwise throw exceptions or write warnings. + * Furthermore determine some missing values (total number of elements (nodes), height and phiVoids). + * @param SSdata + */ +void CaaMLIO::checkAllDataForConsistencyAndSetMissingValues( SN_SNOWSOIL_DATA& SSdata ) +{ + //check station data: + double azimuth = SSdata.meta.getAzimuth(); + double slopeAngle = SSdata.meta.getSlopeAngle(); + if(slopeAngle ==IOUtils::nodata){ + slopeAngle=0; + } + if(azimuth==IOUtils::nodata){ + if(slopeAngle==0){ + azimuth=0; + } + if(slopeAngle > 0){ + throw NoDataException("No data found for 'caaml:validAspect'. If the slope-angle is >0 degree, we also need the azimuth. ", AT); + } + } + SSdata.meta.setSlope(slopeAngle,azimuth); + + //check layer data: + for (size_t ii = 0; ii < SSdata.nLayers; ii++) { + std::string grainFormCode = grainForms[ii]; + if (grainFormCode=="MF"){ + if (SSdata.Ldata[ii].tl < Constants::meltfreeze_tk-0.05){ + SSdata.Ldata[ii].mk = (unsigned short int) (SSdata.Ldata[ii].mk + 10); + } + } + if (grainFormCode=="SH" && ii==SSdata.nLayers-1){ //set parameters for surface hoar (only at the surface, not buried) + SSdata.Ldata[ii].phiWater=0; + SSdata.Ldata[ii].phiIce = hoarDensitySurf/Constants::density_ice; + double grainRadius = M_TO_MM(SSdata.Ldata[ii].hl/2.0); + if(grainRadius != SSdata.Ldata[ii].rg){ + std::cout << "WARNING! Inconsistent input data in caaml-file: Grain size for surface-hoar-layer should be about the same value as the " + << "surface-hoar-layer-thickness (" << grainRadius*2 << " mm). Adjusting grain size from: " + << SSdata.Ldata[ii].rg*2 << " mm to " << grainRadius*2 << " mm." << std::endl; + SSdata.Ldata[ii].rg = grainRadius; + } + SSdata.Ldata[ii].rb = SSdata.Ldata[ii].rg/3.; + SSdata.Ldata[ii].dd = 0.; + SSdata.Ldata[ii].sp = 0.; + SSdata.Ldata[ii].mk = 3; + } + if (SSdata.Ldata[ii].rg == 0.) { + if (grainFormCode=="IF") { + SSdata.Ldata[ii].rg = 3./2.; + SSdata.Ldata[ii].rb = 3./8.; + } else throw IOException("Grain size missing for a non-ice layer!", AT); + } + // set temperature to 0°C if warmer than 0°C: + if (SSdata.Ldata[ii].tl > Constants::meltfreeze_tk){ + std::cout << "WARNING! Inconsistent input data in caaml-file: Temperature in layer " << ii << ": " << SSdata.Ldata[ii].tl-Constants::meltfreeze_tk + << " degree Celsius. Temperature above 0 degree Celsius not possible! Setting temperature to 0 degree Celsius." << std::endl; + SSdata.Ldata[ii].tl = Constants::meltfreeze_tk; + } + // set lwc to 0 if colder than 0°C: + if (SSdata.Ldata[ii].tl < Constants::meltfreeze_tk && SSdata.Ldata[ii].phiWater > 0 ){ + std::cout << "WARNING! Inconsistent input data in caaml-file: LWC: " << SSdata.Ldata[ii].phiWater + << " temperature: " << SSdata.Ldata[ii].tl << " in layer: " << ii << std::endl; + std::cout << "Setting lwc to 0! Since liquid water is not possible in snow below 0 degree Celsius." << std::endl; + //throw IOException("LWC > 0 but temperature below 0°C!", AT); + SSdata.Ldata[ii].phiWater = 0; + } + if (grainFormCode=="FC" && SSdata.Ldata[ii].rg>0.8){ + std::cout << "WARNING! Inconsistent input data in caaml-file: Grain shape 'FC' and grain size > 1.5mm! " + << "Faceted crystals should be smaller than 1.5mm. " << std::endl; + } + if (grainFormCode=="DH" && SSdata.Ldata[ii].rg<0.7){ + std::cout << "WARNING! Inconsistent input data in caaml-file: Grain shape 'DH' and grain size < 1.5mm! " + << "Depth hoar crystals should be larger than 1.5mm. " << std::endl; + } + } + //Compute total number of elements (nodes), height and phiVoids + SSdata.nN = 1; + SSdata.Height = 0.; + for (size_t ii = 0; ii < SSdata.nLayers; ii++) { + SSdata.nN += SSdata.Ldata[ii].ne; + SSdata.Height += SSdata.Ldata[ii].hl; + SSdata.Ldata[ii].phiVoids = 1. - SSdata.Ldata[ii].phiSoil - SSdata.Ldata[ii].phiWater - SSdata.Ldata[ii].phiIce; + } + + if (SSdata.HS_last != IOUtils::nodata){ + if(SSdata.HS_last > SSdata.Height+0.0005 || SSdata.HS_last < SSdata.Height-0.0005){ + std::cout << "WARNING! Inconsistent input data in caaml-file: Snow-height given in caaml:snowPackCond (" << SSdata.HS_last + << " m) is different from the sum of the layer-thicknesses (" << SSdata.Height + << " m)! For the simulation the sum of the layer-thicknesses will be used." << std::endl; + } + } + SSdata.HS_last = SSdata.Height; +} + +/** + * @brief This routine writes the status of the snow cover at program termination and at specified backup times + * @param date current + * @param Xdata + * @param Zdata + * @param forbackup dump Xdata on the go + */ +void CaaMLIO::writeSnowCover(const Date& date, const SnowStation& Xdata, + const ZwischenData& Zdata, const bool& forbackup) +{ + const std::string bak = (forbackup)? "_" + date.toString(mio::Date::NUM) : ""; + std::string snofilename( getFilenamePrefix(Xdata.meta.getStationID().c_str(), o_snowpath) + bak + ".caaml" ); + std::string hazfilename( getFilenamePrefix(Xdata.meta.getStationID().c_str(), o_snowpath) + bak + ".haz" ); + + writeSnowFile(snofilename, date, Xdata); + if (haz_write) SmetIO::writeHazFile(hazfilename, date, Xdata, Zdata); +} + +/** + * @brief This routine writes the status of the snow cover to a caaml-file. + * @param snofilename + * @param date date of the snow-profile + * @param Xdata data structure containing all the data describing a snow-profile (which will be written to + * the caaml-file) + */ +void CaaMLIO::writeSnowFile(const std::string& snofilename, const Date& date, const SnowStation& Xdata) +{ + pugi::xml_document doc; + // Generate XML declaration + pugi::xml_node declarationNode = doc.append_child(pugi::node_declaration); + declarationNode.append_attribute("version") = "1.0"; + declarationNode.append_attribute("encoding") = "UTF-8"; + // A valid XML doc must contain a single root node of any name + pugi::xml_node root = doc.append_child( (namespaceCAAML+":SnowProfile").c_str() ); + root.append_attribute("gml:id") = ("SLF_"+Xdata.meta.stationID).c_str(); + root.append_attribute(("xmlns:"+string((const char*)xml_ns_abrev_xsi)).c_str()) = string((const char*)xml_ns_xsi).c_str(); + root.append_attribute(("xmlns:"+string((const char*)xml_ns_abrev_gml)).c_str()) = string((const char*)xml_ns_gml).c_str(); + root.append_attribute(("xmlns:"+string((const char*)xml_ns_abrev_caaml)).c_str()) = string((const char*)xml_ns_caaml).c_str(); + root.append_attribute("xmlns:snp") = string((const char*)xml_ns_snp).c_str(); + root.append_attribute(("xmlns:"+string((const char*)xml_ns_abrev_slf)).c_str()) = string((const char*)xml_ns_slf).c_str(); + + // Write profile date + pugi::xml_node dateNode = root.append_child( (namespaceCAAML+":timeRef").c_str() ); + dateNode.append_child( (namespaceCAAML+":recordTime").c_str() ) + .append_child( (namespaceCAAML+":TimeInstant").c_str() ) + .append_child( (namespaceCAAML+":timePosition").c_str() ) + .append_child(pugi::node_pcdata).set_value( date.toString(mio::Date::ISO_TZ).c_str() ); + mio::Date now; + now.setFromSys(); + dateNode.append_child( (namespaceCAAML+":dateTimeReport").c_str() ) + .append_child(pugi::node_pcdata).set_value( now.toString(mio::Date::ISO_TZ).c_str() ); + + //Write srcRef + pugi::xml_node srcNode = root.append_child( (namespaceCAAML+":srcRef").c_str() ) + .append_child( (namespaceCAAML+":Operation").c_str() ); + srcNode.append_attribute("gml:id") = "OPERATION_ID"; + const std::string snowpackString = "SNOWPACK v"+info.version+" compiled on "+info.compilation_date+". user: "+info.user; + xmlWriteElement(srcNode,(namespaceCAAML+":name").c_str(),snowpackString.c_str(),"",""); + + // Write station data locRef + writeStationData(root,Xdata); + + // Write stratigraphic profile + pugi::xml_node stratNode = root.append_child( (namespaceCAAML+":snowProfileResultsOf").c_str() ) + .append_child( (namespaceCAAML+":SnowProfileMeasurements").c_str() ); + stratNode.append_attribute("dir") = "top down"; + + //Write custom snow/soil data + pugi::xml_node snowSoilNode = stratNode.append_child( (namespaceCAAML+":metaData").c_str() ) + .append_child( (namespaceCAAML+":customData").c_str() ); + writeCustomSnowSoil(snowSoilNode,Xdata); + + // Write profile depth + sprintf(valueStr,"%.4f",100.*Xdata.cH/Xdata.cos_sl); // cmb + xmlWriteElement(stratNode,(namespaceCAAML+":profileDepth").c_str(),valueStr,"uom","cm"); + + //Write height of snow and Snow Water Equivalent (SWE) + pugi::xml_node tempNode = stratNode.append_child( (namespaceCAAML+":snowPackCond").c_str() ) + .append_child( (namespaceCAAML+":hS").c_str() ) + .append_child( (namespaceCAAML+":Components").c_str() ); + sprintf(valueStr,"%.4f",100.*(Xdata.cH - Xdata.Ground)/Xdata.cos_sl); // cmb + xmlWriteElement(tempNode,(namespaceCAAML+":height").c_str(),valueStr,"uom","cm"); + sprintf(valueStr,"%.2f",Xdata.swe); + xmlWriteElement(tempNode,(namespaceCAAML+":waterEquivalent").c_str(),valueStr,"uom","kgm-2"); + + //Write layers and quantity profiles + writeLayers(stratNode,Xdata); + writeProfiles(stratNode,Xdata); + + // Save XML tree to file. + // Remark: second optional param is indent string to be used; + // default indentation is tab character. + bool saveSucceeded = doc.save_file(snofilename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8); + if(!saveSucceeded){ + std::cout << "Something went wrong with saving the caaml-file: " << snofilename << std::endl; + } +} + + +/** + * @brief Write the custom-snow-soil-data to the caaml-file. + */ +void CaaMLIO::writeCustomSnowSoil(pugi::xml_node& node, const SnowStation& Xdata) +{ + sprintf(valueStr,"%.4f",Xdata.Albedo); + xmlWriteElement(node,(namespaceSNP+":Albedo").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Xdata.SoilAlb); + xmlWriteElement(node,(namespaceSNP+":SoilAlb").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Xdata.BareSoil_z0); + xmlWriteElement(node,(namespaceSNP+":BareSoil_z0").c_str(),valueStr,"uom","m"); + sprintf(valueStr,"%.4f",Xdata.Cdata.height); + xmlWriteElement(node,(namespaceSNP+":CanopyHeight").c_str(),valueStr,"uom","m"); + sprintf(valueStr,"%.4f",Xdata.Cdata.lai); + xmlWriteElement(node,(namespaceSNP+":CanopyLAI").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Xdata.Cdata.BasalArea); + xmlWriteElement(node,(namespaceSNP+":CanopyBasalArea").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Xdata.Cdata.throughfall); + xmlWriteElement(node,(namespaceSNP+":CanopyDirectThroughfall").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Xdata.WindScalingFactor); + xmlWriteElement(node,(namespaceSNP+":WindScalingFactor").c_str(),valueStr,"",""); + sprintf(valueStr,"%d",static_cast(Xdata.ErosionLevel)); + xmlWriteElement(node,(namespaceSNP+":ErosionLevel").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Xdata.TimeCountDeltaHS); + xmlWriteElement(node,(namespaceSNP+":TimeCountDeltaHS").c_str(),valueStr,"",""); +} + +/** + * @brief Write the stratigraphic-layer-data to the caaml-file. + */ +void CaaMLIO::writeLayers(pugi::xml_node& node, const SnowStation& Xdata) +{ + pugi::xml_node stratNode = node.append_child( (namespaceCAAML+":stratProfile").c_str() ); + stratNode.append_child( (namespaceCAAML+":stratMetaData").c_str() ); + + if (!Xdata.Edata.empty()) { + for (size_t ii = Xdata.Edata.size(); ii-->0;) { + const bool snowLayer = (ii >= Xdata.SoilNode); + pugi::xml_node layerNode = stratNode.append_child( (namespaceCAAML+":Layer").c_str() ); + // Write custom layer data + pugi::xml_node customNode = layerNode.append_child( (namespaceCAAML+":metaData").c_str() ) + .append_child( (namespaceCAAML+":customData").c_str() ); + writeCustomLayerData(customNode,Xdata.Edata[ii],Xdata.Ndata[ii]); + + // Write snow and soil layer data + sprintf(layerDepthTopStr,"%.4f",100.*(Xdata.cH - Xdata.Ndata[ii+1].z)/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":depthTop").c_str(),layerDepthTopStr,"uom","cm"); + sprintf(layerThicknessStr,"%.4f",100.*Xdata.Edata[ii].L/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":thickness").c_str(),layerThicknessStr,"uom","cm"); + + if (snowLayer) { + //const unsigned int snowType = ElementData::snowType(Xdata.Edata[ii].dd, Xdata.Edata[ii].sp, Xdata.Edata[ii].rg, Xdata.Edata[ii].mk, Xdata.Edata[ii].theta[WATER], Xdata.Edata[ii].res_wat_cont); + const unsigned int snowType = Xdata.Edata[ii].getSnowType(); + const unsigned int a = snowType/100; + const unsigned int b = (snowType-100*a)/10; + const unsigned int c = snowType-100*a-10*b; + if (c != 2) { + xmlWriteElement(layerNode,(namespaceCAAML+":grainFormPrimary").c_str(),grainShape_valToAbbrev(a).c_str(),"",""); + } else { + xmlWriteElement(layerNode,(namespaceCAAML+":grainFormPrimary").c_str(),"MFcr","",""); + } + xmlWriteElement(layerNode,(namespaceCAAML+":grainFormSecondary").c_str(),grainShape_valToAbbrev(b).c_str(),"",""); + + pugi::xml_node grainSizeNode = layerNode.append_child( (namespaceCAAML+":grainSize").c_str() ); + grainSizeNode.append_attribute("uom") = "mm"; + pugi::xml_node compNode = grainSizeNode.append_child( (namespaceCAAML+":Components").c_str() ); + sprintf(layerValStr,"%.3f",2.*Xdata.Edata[ii].rg); + xmlWriteElement(compNode,(namespaceCAAML+":avg").c_str(),layerValStr,"",""); + } + pugi::xml_node timeNode = layerNode.append_child( (namespaceCAAML+":validFormationTime").c_str() ) + .append_child( (namespaceCAAML+":TimeInstant").c_str() ); + xmlWriteElement(timeNode,(namespaceCAAML+":timePosition").c_str(),Xdata.Edata[ii].depositionDate.toString(mio::Date::ISO_TZ).c_str(),"",""); + if (snowLayer) { + xmlWriteElement(layerNode,(namespaceCAAML+":hardness").c_str(),hardness_valToCode(Xdata.Edata[ii].hard).c_str(),"uom",""); //HACK: check values... seem always the same! + } + xmlWriteElement(layerNode,(namespaceCAAML+":wetness").c_str(),lwc_valToCode(Xdata.Edata[ii].theta[WATER]).c_str(),"uom",""); + } + } +} + +/** + * @brief Write the custom-layer-data to the caaml-file. + */ +void CaaMLIO::writeCustomLayerData(pugi::xml_node& node, const ElementData& Edata, const NodeData& Ndata) +{ + sprintf(valueStr,"%.4f",Edata.theta[SOIL]); + xmlWriteElement(node,(namespaceSNP+":phiSoil").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Edata.soil[2]); + xmlWriteElement(node,(namespaceSNP+":SoilRho").c_str(),valueStr,"uom","kgm-3"); + sprintf(valueStr,"%.4f",Edata.soil[0]); + xmlWriteElement(node,(namespaceSNP+":SoilK").c_str(),valueStr,"uom","Wm-1s-1"); + sprintf(valueStr,"%.4f",Edata.soil[1]); + xmlWriteElement(node,(namespaceSNP+":SoilC").c_str(),valueStr,"uom","Jkg-1"); + sprintf(valueStr,"%.4f",Edata.rb); + xmlWriteElement(node,(namespaceSNP+":bondSize").c_str(),valueStr,"",""); + sprintf(valueStr,"%.2f",Edata.dd); + xmlWriteElement(node,(namespaceSNP+":dendricity").c_str(),valueStr,"",""); + sprintf(valueStr,"%.2f",Edata.sp); + xmlWriteElement(node,(namespaceSNP+":sphericity").c_str(),valueStr,"",""); + sprintf(valueStr,"%4u",static_cast(Edata.mk)); + xmlWriteElement(node,(namespaceSNP+":marker").c_str(),valueStr,"",""); + sprintf(valueStr,"%.4f",Ndata.hoar); + xmlWriteElement(node,(namespaceSNP+":SurfaceHoarMass").c_str(),valueStr,"uom","kgm-2"); + xmlWriteElement(node,(namespaceSNP+":ne").c_str(),"1","",""); + sprintf(valueStr,"%.4f",Edata.CDot); + xmlWriteElement(node,(namespaceSNP+":StressRate").c_str(),valueStr,"uom","Nm-2s-1"); + sprintf(valueStr,"%.4f",Edata.metamo); + xmlWriteElement(node,(namespaceSNP+":Metamorphism").c_str(),valueStr,"",""); + sprintf(valueStr,"%5u",Edata.ID); + xmlWriteElement(node,(namespaceSNP+":ID").c_str(),valueStr,"",""); +} + +/** + * @brief Write the density-, temperature, lwc-, SSA- and strength-profile to the caaml-file. + */ +void CaaMLIO::writeProfiles(pugi::xml_node& node, const SnowStation& Xdata) +{ + // temperature profile + pugi::xml_node tempNode = node.append_child( (namespaceCAAML+":tempProfile").c_str() ); + tempNode.append_child( (namespaceCAAML+":tempMetaData").c_str() ); + if (!Xdata.Ndata.empty()) { + for (size_t ii = Xdata.Ndata.size(); ii-->0;) { + pugi::xml_node obsNode = tempNode.append_child( (namespaceCAAML+":Obs").c_str() ); + sprintf(layerDepthTopStr,"%.4f",100*(Xdata.cH - Xdata.Ndata[ii].z)/Xdata.cos_sl); // cmb + xmlWriteElement(obsNode,(namespaceCAAML+":depth").c_str(),layerDepthTopStr,"uom","cm"); + sprintf(valueStr,"%.3f",unitConversion(Xdata.Ndata[ii].T,(char*)"degK",(char*)"degC")); + xmlWriteElement(obsNode,(namespaceCAAML+":snowTemp").c_str(),valueStr,"uom","degC"); + } + }//end temperature profile + + // density profile; + pugi::xml_node densityNode = node.append_child( (namespaceCAAML+":densityProfile").c_str() ); + pugi::xml_node metaNode1 = densityNode.append_child( (namespaceCAAML+":densityMetaData").c_str() ); + xmlWriteElement(metaNode1,(namespaceCAAML+":methodOfMeas").c_str(),"other","",""); + if (!Xdata.Edata.empty()) { + for (size_t ii = Xdata.Edata.size(); ii-->0;) { + pugi::xml_node layerNode = densityNode.append_child( (namespaceCAAML+":Layer").c_str() ); + sprintf(layerDepthTopStr,"%.4f",100*(Xdata.cH - Xdata.Ndata[ii+1].z)/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":depthTop").c_str(),layerDepthTopStr,"uom","cm"); + sprintf(layerThicknessStr,"%.4f",100*Xdata.Edata[ii].L/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":thickness").c_str(),layerThicknessStr,"uom","cm"); + sprintf(valueStr,"%.2f",Xdata.Edata[ii].Rho); + xmlWriteElement(layerNode,(namespaceCAAML+":density").c_str(),valueStr,"uom","kgm-3"); + } + }//end density profile + + + // lwc profile; + pugi::xml_node lwcNode = node.append_child( (namespaceCAAML+":lwcProfile").c_str() ); + pugi::xml_node metaNode2 = lwcNode.append_child( (namespaceCAAML+":lwcMetaData").c_str() ); + xmlWriteElement(metaNode2,(namespaceCAAML+":methodOfMeas").c_str(),"other","",""); + if (!Xdata.Edata.empty()) { + for (size_t ii = Xdata.Edata.size(); ii-->0;) { + pugi::xml_node layerNode = lwcNode.append_child( (namespaceCAAML+":Layer").c_str() ); + sprintf(layerDepthTopStr,"%.4f",100*(Xdata.cH - Xdata.Ndata[ii+1].z)/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":depthTop").c_str(),layerDepthTopStr,"uom","cm"); + sprintf(layerThicknessStr,"%.4f",100*Xdata.Edata[ii].L/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":thickness").c_str(),layerThicknessStr,"uom","cm"); + sprintf(valueStr,"%.2f",Xdata.Edata[ii].theta[2]*100); + xmlWriteElement(layerNode,(namespaceCAAML+":lwc").c_str(),valueStr,"uom","% by Vol"); + } + }//end lwc profile + + + // specSurfAreaProfile profile; + pugi::xml_node ssaNode = node.append_child( (namespaceCAAML+":specSurfAreaProfile").c_str() ); + pugi::xml_node metaNode3 = ssaNode.append_child( (namespaceCAAML+":specSurfAreaMetaData").c_str() ); + xmlWriteElement(metaNode3,(namespaceCAAML+":methodOfMeas").c_str(),"other","",""); + if (!Xdata.Edata.empty()) { + for (size_t ii = Xdata.Edata.size(); ii-->0;) { + pugi::xml_node layerNode = ssaNode.append_child( (namespaceCAAML+":Layer").c_str() ); + sprintf(layerDepthTopStr,"%.4f",100*(Xdata.cH - Xdata.Ndata[ii+1].z)/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":depthTop").c_str(),layerDepthTopStr,"uom","cm"); + sprintf(layerThicknessStr,"%.4f",100*Xdata.Edata[ii].L/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":thickness").c_str(),layerThicknessStr,"uom","cm"); + double ogs = Xdata.Edata[ii].ogs; + double rho = Xdata.Edata[ii].Rho; + double ssa = 6.0/(rho*ogs/1000.0); //conversion from optical grain size to ssa + sprintf(valueStr,"%.2f",ssa); + xmlWriteElement(layerNode,(namespaceCAAML+":specSurfArea").c_str(),valueStr,"uom","m2kg-1"); + } + }//end specSurfAreaProfile + + // strengthProfile; + pugi::xml_node strengthNode = node.append_child( (namespaceCAAML+":strengthProfile").c_str() ); + pugi::xml_node metaNode4 = strengthNode.append_child( (namespaceCAAML+":strengthMetaData").c_str() ); + xmlWriteElement(metaNode4,(namespaceCAAML+":strengthType").c_str(),"shear","",""); + xmlWriteElement(metaNode4,(namespaceCAAML+":methodOfMeas").c_str(),"other","",""); + if (!Xdata.Edata.empty()) { + for (size_t ii = Xdata.Edata.size(); ii-->0;) { + pugi::xml_node layerNode = strengthNode.append_child( (namespaceCAAML+":Layer").c_str() ); + sprintf(layerDepthTopStr,"%.4f",100*(Xdata.cH - Xdata.Ndata[ii+1].z)/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":depthTop").c_str(),layerDepthTopStr,"uom","cm"); + sprintf(layerThicknessStr,"%.4f",100*Xdata.Edata[ii].L/Xdata.cos_sl); // cmb + xmlWriteElement(layerNode,(namespaceCAAML+":thickness").c_str(),layerThicknessStr,"uom","cm"); + sprintf(valueStr,"%.2f",(Xdata.Edata[ii].s_strength)*1000); //conversion from kPa to Nm-2: *1000 + xmlWriteElement(layerNode,(namespaceCAAML+":strengthValue").c_str(),valueStr,"uom","Nm-2"); + } + }//end strengthProfile +} + +/** + * @brief Write the station-data to the caaml-file. + */ +void CaaMLIO::writeStationData(pugi::xml_node& root, const SnowStation& Xdata) +{ + pugi::xml_node node = root.append_child( (namespaceCAAML+":locRef").c_str() ); + node.append_attribute("gml:id") = ("SLF_"+Xdata.meta.stationID+"_1").c_str(); + xmlWriteElement(node,(namespaceCAAML+":name").c_str(),(const char*) Xdata.meta.stationName.c_str(),"",""); + xmlWriteElement(node,(namespaceCAAML+":obsPointSubType").c_str(),"","",""); + + pugi::xml_node elevNode = node.append_child( (namespaceCAAML+":validElevation").c_str() ) + .append_child( (namespaceCAAML+":ElevationPosition").c_str() ); + elevNode.append_attribute("uom") = "m"; + char elevStr[5]; + sprintf(elevStr,"%.0f",Xdata.meta.position.getAltitude()); + xmlWriteElement(elevNode,(namespaceCAAML+":position").c_str(),elevStr,"",""); + + pugi::xml_node aspectNode = node.append_child( (namespaceCAAML+":validAspect").c_str() ) + .append_child( (namespaceCAAML+":AspectPosition").c_str() ); + xmlWriteElement(aspectNode,(namespaceCAAML+":position").c_str(),IOUtils::bearing(Xdata.meta.getAzimuth()).c_str(),"",""); + + if(Xdata.meta.getSlopeAngle()!=IOUtils::nodata){ + pugi::xml_node slopeNode = node.append_child( (namespaceCAAML+":validSlopeAngle").c_str() ) + .append_child( (namespaceCAAML+":SlopeAnglePosition").c_str() ); + slopeNode.append_attribute("uom") = "deg"; + char slopeStr[4]; + sprintf(slopeStr,"%.0f",Xdata.meta.getSlopeAngle()); + xmlWriteElement(slopeNode,(namespaceCAAML+":position").c_str(),slopeStr,"",""); + } + pugi::xml_node pointNode = node.append_child( (namespaceCAAML+":pointLocation").c_str() ) + .append_child( (namespaceGML+":Point").c_str() ); + pointNode.append_attribute("gml:id") = ("SLF_"+Xdata.meta.stationID+"_2").c_str(); + pointNode.append_attribute("srsName") = "urn:ogc:def:crs:OGC:1.3:CRS84"; + pointNode.append_attribute("srsDimension") = "2"; + char posStr[30]; + sprintf(posStr,"%f %f",Xdata.meta.position.getLon(),Xdata.meta.position.getLat()); + xmlWriteElement(pointNode,"gml:pos",posStr,"",""); +} + +void CaaMLIO::writeTimeSeries(const SnowStation& /*Xdata*/, const SurfaceFluxes& /*Sdata*/, const CurrentMeteo& /*Mdata*/, + const ProcessDat& /*Hdata*/, const double /*wind_trans24*/) +{ + throw IOException("Nothing implemented here!", AT); +} + +void CaaMLIO::writeProfile(const Date& /*date*/, const SnowStation& /*Xdata*/) +{ + throw IOException("Nothing implemented here!", AT); +} + +bool CaaMLIO::writeHazardData(const std::string& /*stationID*/, const std::vector& /*Hdata*/, + const std::vector& /*Hdata_ind*/, const size_t& /*num*/) +{ + throw IOException("Nothing implemented here!", AT); +} + +/** + * @brief Convert from liquid water content code to value + * @author Adrien Gaudard + * @param code Liquid water content code (one character) + * return Liquid water content value (fraction) + */ +double CaaMLIO::lwc_codeToVal(const char* code) +{ + if (!strcmp(code,"D")) return 0.; + if (!strcmp(code,"M")) return 0.01; + if (!strcmp(code,"W")) return 0.03; + if (!strcmp(code,"V")) return 0.08; + if (!strcmp(code,"S")) return 0.15; + + throw IOException("Unrecognized liquid water content code.", AT); +} + +/** + * @brief Convert from liquid water content value to code + * @author Adrien Gaudard + * @param val Liquid water content value (fraction) + * return Liquid water content code (one character) + */ +std::string CaaMLIO::lwc_valToCode(const double val) +{ + if (val == 0.00) return "D"; + if (val < 0.03) return "M"; + if (val < 0.08) return "W"; + if (val < 0.15) return "V"; + if (val < 1.00) return "S"; + + throw IOException("Invalid liquid water content value.", AT); +} + +/** + * @brief Convert from hardness code to value + * @author Adrien Gaudard + * @param code Hardness code + * return Hardness value (1 to 6) + */ +double CaaMLIO::hardness_codeToVal(char* code) +{ + const std::string codeString(code); + if( codeString == "n/a") + return IOUtils::nodata; + + double val = 0.; + unsigned int n = 0; + char* c[2]; + c[0] = strtok(code,"-"); + c[1] = strtok(NULL,"-"); + + for (size_t i=0; i<2; i++) { + if (c[i]) { + n++; + if (!strcmp(c[i],"F")) { + val += 1.; + } else if (!strcmp(c[i],"4F")) { + val += 2.; + } else if (!strcmp(c[i],"1F")) { + val += 3.; + } else if (!strcmp(c[i],"P")) { + val += 4.; + } else if (!strcmp(c[i],"K")) { + val += 5.; + } else if (!strcmp(c[i],"I")) { + val += 6.; + } else { + throw IOException("Unrecognized hardness code.", AT); + } + } + } + return val/n; +} + +/** + * @brief Convert from hardness value to code + * @author Adrien Gaudard + * @param val Hardness value (1 to 6) + * return Hardness code + */ +std::string CaaMLIO::hardness_valToCode(const double val) +{ + if (val <= 1.0) return "F"; + if (val <= 1.5) return "F-4F"; + if (val <= 2.0) return "4F"; + if (val <= 2.5) return "4F-1F"; + if (val <= 3.0) return "1F"; + if (val <= 3.5) return "1F-P"; + if (val <= 4.0) return "P"; + if (val <= 4.5) return "P-K"; + if (val <= 5.0) return "K"; + if (val <= 5.5) return "K-I"; + if (val <= 6.0) return "I"; + if (val == IOUtils::nodata) return "n/a"; + std::cout<< "Hardness value: " << val << std::endl; + throw IOException("Unrecognized hardness value: ", AT); +} + +/** + * @brief Convert from grain shape code to values (sphericity, dendricity, marker) + * @author Adrien Gaudard + * @param[in] code Grain shape code + * @param[out] sp sphericity + * @param[out] dd dendricity + * @param[out] mk micro-structure marker + */ +void CaaMLIO::grainShape_codeToVal(const std::string& code, double &sp, double &dd, unsigned short int &mk) +{ + //first check some special grain shapes (with 4 letters): + if (code=="PPgp") { + sp = 1.; dd = 0.; mk = 4; + } else { + //otherwise take only the first two letters of the code + const std::string code2Letters = code.substr(0,2); + if (code2Letters=="PP") { + sp = 0.5; dd = 1.; mk = 0; + } else if (code2Letters=="DF") { + sp = 0.5; dd = 0.5; mk = 0; + } else if (code2Letters=="RG") { + sp = 1.; dd = 0.; mk = 2; //why marker=2? + } else if (code2Letters=="FC") { + sp = 0.2; dd = 0.; mk = 1; + } else if (code2Letters=="DH") { //FC or DH is distinguished by grain size. A message is given if there is DH with grain size < 1.5mm. or if FC with grain size > 1.5 mm! + sp = 0.2; dd = 0.; mk = 1; + } else if (code2Letters=="SH") { + sp = 0.; dd = 0.; mk = 3; + } else if (code2Letters=="MF") { + sp = 1.; dd = 0.; mk = 12; + } else if (code2Letters=="IF") { + sp = 1.; dd = 0.; mk = 7; + } else if (code2Letters=="MM") { + sp = 1.; dd = 0.; mk = 2; //which values make sense here??? + } else { + throw IOException("Unrecognized grain shape code.", AT); + } + } +} + + +/** + * @brief Convert from grain shape value to code + * @author Adrien Gaudard + * @param var Grain shape value + * return Grain shape code + */ +std::string CaaMLIO::grainShape_valToAbbrev(const unsigned int var) +{ + if (var == 0) return "PPgp"; + if (var == 1) return "PP"; + if (var == 2) return "DF"; + if (var == 3) return "RG"; + if (var == 4) return "FC"; + if (var == 5) return "DH"; + if (var == 6) return "SH"; + if (var == 7) return "MF"; + if (var == 8) return "IF"; + if (var == 9) return "FCxr"; + + throw IOException("Unrecognized grain shape code.", AT); +} + +/** + * @brief Convert from grain shape values (sphericity, dendricity, marker) to two-character code + * @author Adrien Gaudard + * @param var Grain shape values (sphericity, dendricity, marker) (AMBIGUOUS) + * return Grain shape two-character code (NOT ALL REPRESENTED) + */ +std::string CaaMLIO::grainShape_valToAbbrev_old(const double* var) +{ + const double sp = ((int)(var[0]*10+0.5))/10.; + const double dd = ((int)(var[1]*10+0.5))/10.; + const double mk = ((int)(var[2]*10+0.5))/10.; + + if (sp == 0.5 && dd == 1. && mk == 0.) return "PP"; + if (sp == 0.5 && dd == 0.5 && mk == 0.) return "DF"; + if (sp == 1. && dd == 0. && (mk == 2. || mk == 12.)) return "RG"; + if (sp == 0. && dd == 0. && mk == 1.) return "FC"; + if (sp == 0. && dd == 0. && mk == 1.) return "DH"; + if (sp == 0. && dd == 0. && mk == 1.) return "SH"; + if (sp == 1. && dd == 0. && mk == 2.) return "MF"; + if (sp == 1. && dd == 0. && mk == 2.) return "IF"; + + throw IOException("Unrecognized set of grain shape values.", AT); +} + +/** + * @brief Add a child-xml-element with a value and optionally an attribute to an xml-element. Example: + * Current xml-element: + * New child-element which will be added: 1.91 + * @param node pointing to the xml-element where we want to add a child-element. e.g. "caaml:Layer" + * @param name name of the new xml-element. e.g. "caaml:depthTop" + * @param content value of the new xml-elemnt. e.g. "1.91" + * @param att_name name of the attribute. e.g. "uom" + * @return att_val value of the attribute. e.g. "cm" + */ +void CaaMLIO::xmlWriteElement(pugi::xml_node& node, const char* name, const char* content, const char* att_name, const char* att_val) +{ + pugi::xml_node child = node.append_child( name ); + child.append_child(pugi::node_pcdata).set_value( content ); + if (strcmp(att_name,"")) //ie: string not empty + child.append_attribute( att_name ) = att_val; +} + +/** + * @brief Check if a certain path exists in an xml-file. + * @param path path to check. e.g. "/caaml:SnowProfile/caaml:locRef" + * return true if the path exists + */ +bool CaaMLIO::xmlDoesPathExist(const std::string& path) +{ + const pugi::xml_node node = inDoc.first_element_by_path(path.c_str()); + if( node.empty() ){ + return false; + } + return true; +} + +/** + * @brief Read a double-value from a path. + * @param path e.g. "/caaml:SnowProfile/caaml:snowProfileResultsOf/.../caaml:customData/snp" + * @param property e.g. "Albedo" + * @param dflt default-value to return if the path does not exist in the xml-file + * return the value which was read in (or the default-value) + */ +double CaaMLIO::xmlReadValueFromPath (const string& xpath, const std::string& property, const double& dflt) +{ + double val = IOUtils::nodata; + const std::string path( SnowData_xpath+xpath+":"+property ); + const pugi::xml_node node = inDoc.first_element_by_path(path.c_str()); + const char* valStr = node.child_value(); + if (node.empty() ){ + val=dflt; + }else{ + sscanf(valStr, "%lf", &val); + } + //std::cout << property << ": " << val << "." << std::endl; + return val; +} + +/** + * @brief Read an int-value from a path. + * @param path e.g. "/caaml:SnowProfile/caaml:snowProfileResultsOf/.../caaml:customData/snp" + * @param property e.g. "Albedo" + * @param dflt default-value to return if the path does not exist in the xml-file + * return the value which was read in (or the default-value) + */ +int CaaMLIO::xmlReadValueFromPath (const string& xpath, const std::string& property, const int& dflt) +{ + int val = IOUtils::inodata; + const std::string path( SnowData_xpath+xpath+":"+property ); + const pugi::xml_node node = inDoc.first_element_by_path(path.c_str()); + const char* valStr = node.child_value(); + if (node.empty() ){ + val=dflt; + }else{ + sscanf(valStr, "%d", &val); + } + //std::cout << property << ": " << val << "." << std::endl; + return val; +} + +/** + * @brief Read an attribute from a path. + * @param path e.g. "/caaml:SnowProfile/caaml:locRef" + * @param attributeName e.g. "gml:id" + * return the value of the attribute which was read in e.g. "OPERATION_ID" + */ +std::string CaaMLIO::xmlReadAttributeFromPath (const string& path, const std::string& attributeName) +{ + const pugi::xml_node node = inDoc.first_element_by_path(path.c_str()); + const std::string attrStr(node.attribute(attributeName.c_str()).as_string()); + //std::cout << attributeName << ": " << attrStr << "." << std::endl; + return attrStr; +} + +/** + * @brief Read a value from an xml-element (node) and convert the unit if wished. + * @param node pointing to the xml-element we want to read the value from + * @param propertyName only read the value from the xml-element if its name is equal to this parameter + * @param variableToSet output + * @param unitOut wished unit of the variable + * @param unitMeasured the (current) unit of the variable + * @param factor this factor is applied to the variable + * return true if value was read successfully + */ +bool CaaMLIO::xmlReadValueFromNode (const pugi::xml_node node, const std::string propertyName, double& variableToSet, + const std::string unitOut,const std::string unitMeasured, const double factor) +{ + const std::string fieldName( node.name() ); + if (fieldName == propertyName){ + double temp; + sscanf((const char*) node.child_value(),"%lf",&temp); + if (unitOut != ""){ + char* unitOfMeasurement = (char*)unitMeasured.c_str(); + if(unitMeasured==""){ + unitOfMeasurement = (char*) node.attribute("uom").as_string(); + } + temp = unitConversion(temp,unitOfMeasurement,(char*) unitOut.c_str()); + } + variableToSet = temp*factor; + //std::cout << propertyName << ": " << variableToSet << unitOut << std::endl; + return true; + } + return false; +} + +/** + * @brief Print all the important data which was read in from the caaml-file. Just to check if + * everything was read in correctly. + * @param SSdata contains all the data which was read in from the caaml-file. + */ +bool CaaMLIO::checkWhatWasReadIn(SN_SNOWSOIL_DATA& SSdata) +{ + std::cout << "Summary of all snow-soil-data (from Caaml-file): " << std::endl; + std::cout << SSdata.toString() << std::endl; + return true; +} diff --git a/third_party/snowpack/plugins/CaaMLIO.h b/third_party/snowpack/plugins/CaaMLIO.h index b8c154eb..fb388f9f 100644 --- a/third_party/snowpack/plugins/CaaMLIO.h +++ b/third_party/snowpack/plugins/CaaMLIO.h @@ -18,17 +18,19 @@ along with MeteoIO. If not, see . #ifndef CAAMLIO_H #define CAAMLIO_H +#include #include "../Constants.h" #include "../Hazard.h" -#include "SmetIO.h" #include "SnowpackIOInterface.h" -#include +#include "SmetIO.h" #include -#include -#include -#include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Weffc++" +#include +#pragma GCC diagnostic pop + /** * @class CaaMLIO @@ -43,14 +45,11 @@ class CaaMLIO : public SnowpackIOInterface { public: CaaMLIO(const SnowpackConfig& i_cfg, const RunInfo& run_info); CaaMLIO(const CaaMLIO&); - ~CaaMLIO() throw(); - - CaaMLIO& operator=(const CaaMLIO&); ///& vec_width, std::vector& vec_precision) const; - std::string getFilenamePrefix(const std::string& fnam, const std::string& path, const bool addexp=true) const; - bool read_snocaaml(const std::string& snofilename, const std::string& stationID, SN_SNOWSOIL_DATA& SSdata); - void writeSnowFile(const std::string& snofilename, const mio::Date& date, const SnowStation& Xdata, - const bool aggregate); - - const RunInfo info; - std::string i_snowpath, sw_mode, o_snowpath, experiment; - bool useSoilLayers, perp_to_slope, aggregate_caaml; - /*static const*/ double in_tz; //plugin specific time zones - std::string snow_prefix, snow_ext; //for the file naming scheme - double caaml_nodata; //plugin specific no data value - - xmlDocPtr in_doc; - xmlXPathContextPtr in_xpathCtx; - xmlCharEncoding in_encoding; - static const xmlChar *xml_ns_caaml, *xml_ns_abrev_caaml; - static const xmlChar *xml_ns_gml, *xml_ns_abrev_gml; - static const xmlChar *xml_ns_xsi, *xml_ns_abrev_xsi; - static const xmlChar *xml_ns_slf, *xml_ns_abrev_slf; - static const xmlChar *xml_ns_snp, *xml_ns_abrev_snp; - static const std::string TimeData_xpath, StationMetaData_xpath, SnowData_xpath; - xmlNodeSetPtr xmlGetData(const std::string& path); + //functions for reading caaml-files: + bool read_snocaaml(const std::string& snofilename, const std::string& stationID, SN_SNOWSOIL_DATA& SSdata); + void openIn_CAAML(const std::string& in_snowfile); mio::Date xmlGetDate(); mio::StationData xmlGetStationData(const std::string& stationID); - double xmlSetVal(const std::string& xpath, const std::string& property, const double& dflt); - int xmlSetVal(const std::string& xpath, const std::string& property, const int& dflt); void setCustomSnowSoil(SN_SNOWSOIL_DATA& Xdata); + void xmlReadLayerData(SN_SNOWSOIL_DATA& SSdata); + void adjustToSlopeAngle(SN_SNOWSOIL_DATA& SSdata); // #cmb + LayerData xmlGetLayer(pugi::xml_node nodeLayer, std::string& grainFormCode); bool getLayersDir(); - LayerData xmlGetLayer(xmlNodePtr cur); - void getProfiles(const std::string path, std::vector &depths, std::vector &val); - void setProfileVal(std::vector &Layers, std::vector > depths, std::vector > val); - void setCustomLayerData(LayerData &Layer); - void setDepositionDates(std::vector &Layers, const mio::Date); - - void xmlWriteElement(const xmlTextWriterPtr writer, const char* name, const char* content, const char* att_name, const char* att_val); - // void writeDate(const xmlTextWriterPtr writer, const mio::Date date); - void writeDate(const xmlTextWriterPtr writer, const char* att_name, const char* att_val); - void writeCustomSnowSoil(const xmlTextWriterPtr writer, const SnowStation& Xdata); - void writeLayers(const xmlTextWriterPtr writer, const SnowStation& Xdata); - void writeCustomLayerData(const xmlTextWriterPtr writer, const ElementData& Edata, const NodeData& Ndata); - void writeProfiles(const xmlTextWriterPtr writer, const SnowStation& Xdata); - void writeStationData(const xmlTextWriterPtr writer, const SnowStation& Xdata); + void getAndSetProfile(const std::string path, const std::string name,const bool directionTopDown, + const bool isRangeMeasurement,std::vector& Layers); + bool xmlGetProfile(const std::string path, const std::string name, std::vector& zVec, std::vector& valVec); + void estimateValidFormationTimesIfNotSetYet(std::vector &Layers, const mio::Date); + void checkAllDataForConsistencyAndSetMissingValues( SN_SNOWSOIL_DATA& SSdata ); + bool checkWhatWasReadIn(SN_SNOWSOIL_DATA& SSdata); + + //functions for writing caaml-file: + void writeSnowFile(const std::string& snofilename, const mio::Date& date, const SnowStation& Xdata); + void writeCustomSnowSoil(pugi::xml_node& node, const SnowStation& Xdata); + void writeLayers(pugi::xml_node& node, const SnowStation& Xdata); + void writeCustomLayerData(pugi::xml_node& node, const ElementData& Edata, const NodeData& Ndata); + void writeProfiles(pugi::xml_node& node, const SnowStation& Xdata); + void writeStationData(pugi::xml_node& root, const SnowStation& Xdata); double lwc_codeToVal(const char* code); std::string lwc_valToCode(const double val); @@ -124,8 +104,35 @@ class CaaMLIO : public SnowpackIOInterface { std::string grainShape_valToAbbrev(const unsigned int var); std::string grainShape_valToAbbrev_old(const double* var); - char layerDepthTopStr[10], layerThicknessStr[10], layerValStr[10], valueStr[10], dateStr[30]; + //xml functions: + double xmlReadValueFromPath(const std::string& xpath, const std::string& property, const double& dflt); + int xmlReadValueFromPath(const std::string& xpath, const std::string& property, const int& dflt); + void xmlWriteElement(pugi::xml_node& node, const char* name, const char* content, const char* att_name, const char* att_val); + bool xmlDoesPathExist(const std::string& path); + bool xmlReadValueFromNode(const pugi::xml_node node, const std::string propertyName, double& variableToSet, + const std::string unitOut = "",const std::string unitMeasured = "", const double factor=1.0); + std::string xmlReadAttributeFromPath (const std::string& path, const std::string& attributeName); + + const RunInfo info; + std::string i_snowpath, o_snowpath, experiment; + double i_max_element_thickness; + bool caaml_writeout_as_readin, haz_write; + /*static const*/ double in_tz; //plugin specific time zones + + pugi::xml_document inDoc; + pugi::xml_encoding inEncoding; + + //charEncoding in_encoding; + static const char *xml_ns_caaml, *xml_ns_abrev_caaml; + static const char *xml_ns_gml, *xml_ns_abrev_gml; + static const char *xml_ns_xsi, *xml_ns_abrev_xsi; + static const char *xml_ns_slf, *xml_ns_abrev_slf; + static const char *xml_ns_snp, *xml_ns_abrev_snp; + static const std::string TimeData_xpath, StationMetaData_xpath, SnowData_xpath; + char layerDepthTopStr[10], layerThicknessStr[10], layerValStr[10], valueStr[10]; + double hoarDensitySurf; + std::vector grainForms; }; #endif //End of CAAMLIO.h diff --git a/third_party/snowpack/plugins/ImisDBIO.cc b/third_party/snowpack/plugins/ImisDBIO.cc index 7084c7a2..66223de7 100644 --- a/third_party/snowpack/plugins/ImisDBIO.cc +++ b/third_party/snowpack/plugins/ImisDBIO.cc @@ -128,7 +128,7 @@ bool ImisDBIO::snowCoverExists(const std::string& /*i_snowfile*/, const std::str } void ImisDBIO::readSnowCover(const std::string& /*i_snowfile*/, const std::string& /*stationID*/, - SN_SNOWSOIL_DATA& /*SSdata*/, ZwischenData& /*Zdata*/) + SN_SNOWSOIL_DATA& /*SSdata*/, ZwischenData& /*Zdata*/, const bool& read_salinity) { throw IOException("Nothing implemented here!", AT); } @@ -176,7 +176,7 @@ void ImisDBIO::insertProfile(const std::vector &Pdata) const occi::Date calcDate( OracleDate(info.computation_date) ); const std::string stat_abk( Pdata[0].stationname ); const unsigned char stao_nr = Pdata[0].loc_for_snow; - const double version = atof( info.version.c_str() ); + const double version = info.version_num; //check that the station can really be an IMIS station if(stat_abk.size()>4 || stat_abk.find_first_of("0123456789")!=string::npos) @@ -212,7 +212,7 @@ void ImisDBIO::insertProfile(const std::vector &Pdata) stmt->executeUpdate(); // execute the statement stmt } catch (const exception& e) { cerr << "[E] SDB profile for station " << stat_abk << stao_nr << " at " << Pdata[0].profileDate.toString(mio::Date::ISO); - cerr << "\tsnowpack_version: " << fixed << setw(12) << setprecision(3) << info.version << "\tcalculation_date: " << Pdata[ii].depositionDate.toString(mio::Date::ISO); + cerr << "\tsnowpack_version: " << info.version << "\tcalculation_date: " << Pdata[ii].depositionDate.toString(mio::Date::ISO); print_Profile_query(Pdata[ii]); throw; //rethrow the exception } @@ -222,7 +222,7 @@ void ImisDBIO::insertProfile(const std::vector &Pdata) (stmt->getConnection())->commit(); } catch (const exception& e) { cerr << "[E] SDB profile for station " << stat_abk << stao_nr << " at " << Pdata[0].profileDate.toString(mio::Date::ISO); - cerr << "\tsnowpack_version: " << fixed << setw(12) << setprecision(3) << info.version << "\tcalculation_date: " << Pdata[ii].depositionDate.toString(mio::Date::ISO); + cerr << "\tsnowpack_version: " << info.version << "\tcalculation_date: " << Pdata[ii].depositionDate.toString(mio::Date::ISO); throw; //rethrow the exception } } @@ -455,7 +455,7 @@ void ImisDBIO::insertHdata(const std::string& stationName, const std::string& st const size_t& num) { unsigned int rows_inserted = 0; - const double version = atof( info.version.c_str() ); + const double version = info.version_num; int statNum = 0; IOUtils::convertString(statNum, stationNumber); const occi::Date computationdate( OracleDate( info.computation_date ) ); @@ -569,7 +569,7 @@ void ImisDBIO::insertHdata(const std::string& stationName, const std::string& st rows_inserted += stmt->executeUpdate(); // execute the statement stmt } catch (const exception& e) { cerr << "[E] SDB for station " << stationName << statNum << " at " << Hdata[i].date.toString(mio::Date::ISO); - cerr << "\tsnowpack_version: " << fixed << setw(12) << setprecision(3) << info.version << "\tcalculation_date: " << info.computation_date.toString(mio::Date::ISO); + cerr << "\tsnowpack_version: " << info.version << "\tcalculation_date: " << info.computation_date.toString(mio::Date::ISO); print_Hdata_query(Hdata[i], Hdata_ind[i]); throw; //rethrow the exception } @@ -579,7 +579,7 @@ void ImisDBIO::insertHdata(const std::string& stationName, const std::string& st (stmt->getConnection())->commit(); } catch (const exception& e) { cerr << "[E] Commit to SDB failed for station " << stationName << statNum << " after " << Hdata[i].date.toString(mio::Date::ISO); - cerr << "\tsnowpack_version: " << fixed << setw(12) << setprecision(3) << info.version << "\tcalculation_date: " << info.computation_date.toString(mio::Date::ISO); + cerr << "\tsnowpack_version: " << info.version << "\tcalculation_date: " << info.computation_date.toString(mio::Date::ISO); throw; //rethrow the exception } } diff --git a/third_party/snowpack/plugins/ImisDBIO.h b/third_party/snowpack/plugins/ImisDBIO.h index 16bd1857..9bbfa662 100644 --- a/third_party/snowpack/plugins/ImisDBIO.h +++ b/third_party/snowpack/plugins/ImisDBIO.h @@ -21,14 +21,13 @@ #ifndef IMISDBIO_H #define IMISDBIO_H -#include "../DataClasses.h" #include "SnowpackIOInterface.h" -#include +#include "../DataClasses.h" #include #include +#include class ImisDBIO : public SnowpackIOInterface{ - public: ImisDBIO(const SnowpackConfig& i_cfg, const RunInfo& run_info); ImisDBIO(const ImisDBIO& in); @@ -37,7 +36,7 @@ class ImisDBIO : public SnowpackIOInterface{ virtual bool snowCoverExists(const std::string& i_snowfile, const std::string& stationID) const; virtual void readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata); + SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata, const bool& read_salinity); virtual void writeSnowCover(const mio::Date& date, const SnowStation& Xdata, const ZwischenData& Zdata, const bool& forbackup=false); diff --git a/third_party/snowpack/plugins/SmetIO.cc b/third_party/snowpack/plugins/SmetIO.cc index ccc89e53..651b4c72 100644 --- a/third_party/snowpack/plugins/SmetIO.cc +++ b/third_party/snowpack/plugins/SmetIO.cc @@ -21,6 +21,7 @@ #include "SmetIO.h" #include "../Utils.h" #include "../snowpackCore/Metamorphism.h" +#include "../StabilityAlgorithms.h" using namespace std; using namespace mio; @@ -29,9 +30,14 @@ using namespace mio; * @page smet SMET * @section smet_format Format * This plugin reads the SMET files as specified in the - * MeteoIO pre-processing library documentation (under + * MeteoIO pre-processing library documentation (under * "Available plugins and usage", then "smet"). * +* @note There is also a python library, pySMET available, to read SMET files. + * + * @section fluxes_ts Fluxes timeseries + * These files are very regular SMET files with a large number of fields. + * * @section layers_data Layers data * The snow/soil layers file has the structure described below: * - the SMET signature (to identify the file as SMET as well as the format version) @@ -55,11 +61,12 @@ using namespace mio; * Tlayer temperature [K] * Vol_Frac_Ifractional ice volume [0-1] * Vol_Frac_Wfractional water volume [0-1] + * Vol_Frac_WPfractional preferential water volume [0-1] * Vol_Frac_Vfractional voids volume [0-1] * Vol_Frac_Sfractional soil volume [0-1] * Rho_Ssoil density [kg/m3] * Conduc_Smineral phase soil thermal conductivity [w/(mK)] - * HeatCapac_Smineral phase soil thermal capacity [J/K] + * HeatCapac_Smineral phase soil thermal capacity [J/(kg*K)] * * * @@ -89,18 +96,18 @@ using namespace mio; * source = WSL-SLF ;optional key * ProfileDate = 2009-10-01T00:00 ;when was the profile made, see explanations above * HS_Last = 0.0000 ;last measured snow height - * SlopeAngle = 38.0 - * SlopeAzi = 0.0 + * SlopeAngle = 38.0 + * SlopeAzi = 0.0 * nSoilLayerData = 0 ;number of soil layers * nSnowLayerData = 1 ;number of snow layers * SoilAlbedo = 0.20 ;albedo of the exposed soil * BareSoil_z0 = 0.020 ;roughtness length of the exposed soil * CanopyHeight = 0.00 ;height (in m) of the canopy - * CanopyLeafAreaIndex = 0.00 - * CanopyDirectThroughfall = 1.00 + * CanopyLeafAreaIndex = 0.00 + * CanopyDirectThroughfall = 1.00 * WindScalingFactor = 1.00 ;some stations consistently measure a wind that is too low - * ErosionLevel = 0 - * TimeCountDeltaHS = 0.000000 + * ErosionLevel = 0 + * TimeCountDeltaHS = 0.000000 * fields = timestamp Layer_Thick T Vol_Frac_I Vol_Frac_W Vol_Frac_V Vol_Frac_S Rho_S Conduc_S HeatCapac_S rg rb dd sp mk mass_hoar ne CDot metamo * [DATA] * 2009-09-19T02:30 0.003399 273.15 0.579671 0.068490 0.351839 0.000000 0.0 0.0 0.0 1.432384 1.028390 0.000000 1.000000 22 0.000000 1 0.000000 0.000000 @@ -138,12 +145,12 @@ using namespace mio; * the profile date. */ SmetIO::SmetIO(const SnowpackConfig& cfg, const RunInfo& run_info) - : fixedPositions(), outpath(), o_snowpath(), snowpath(), experiment(), inpath(), i_snowpath(), sw_mode(), - info(run_info), tsWriters(), + : fixedPositions(), outpath(), o_snowpath(), experiment(), inpath(), i_snowpath(), + metamorphism_model(), variant(), sw_mode(), info(run_info), tsWriters(), acdd(false), in_dflt_TZ(0.), calculation_step_length(0.), ts_days_between(0.), min_depth_subsurf(0.), - avgsum_time_series(false), useCanopyModel(false), useSoilLayers(false), research_mode(false), perp_to_slope(false), + avgsum_time_series(false), useCanopyModel(false), useSoilLayers(false), research_mode(false), perp_to_slope(false), haz_write(true), useReferenceLayer(false), out_heat(false), out_lw(false), out_sw(false), out_meteo(false), out_haz(false), out_mass(false), out_t(false), - out_load(false), out_stab(false), out_canopy(false), out_soileb(false) + out_load(false), out_stab(false), out_canopy(false), out_soileb(false), useRichardsEq(false), enable_pref_flow(false), enable_ice_reservoir(false), read_dsm(false) { cfg.getValue("TIME_ZONE", "Input", in_dflt_TZ); cfg.getValue("CANOPY", "Snowpack", useCanopyModel); @@ -151,19 +158,32 @@ SmetIO::SmetIO(const SnowpackConfig& cfg, const RunInfo& run_info) cfg.getValue("SW_MODE", "Snowpack", sw_mode); cfg.getValue("MIN_DEPTH_SUBSURF", "SnowpackAdvanced", min_depth_subsurf); cfg.getValue("PERP_TO_SLOPE", "SnowpackAdvanced", perp_to_slope); - cfg.getValue("AVGSUM_TIME_SERIES", "Output", avgsum_time_series); + cfg.getValue("AVGSUM_TIME_SERIES", "Output", avgsum_time_series, IOUtils::nothrow); cfg.getValue("RESEARCH", "SnowpackAdvanced", research_mode); + cfg.getValue("METAMORPHISM_MODEL", "SnowpackAdvanced", metamorphism_model, IOUtils::nothrow); + cfg.getValue("VARIANT", "SnowpackAdvanced", variant); + cfg.getValue("PREF_FLOW", "SnowpackAdvanced", enable_pref_flow); + cfg.getValue("ICE_RESERVOIR", "SnowpackAdvanced", enable_ice_reservoir); + cfg.getValue("READ_DSM", "SnowpackAdvanced", read_dsm); + + //Check for use of Richards Equation + useRichardsEq = false; + std::string tmp_useRichardsEq; + cfg.getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", tmp_useRichardsEq); + if (tmp_useRichardsEq=="RICHARDSEQUATION") useRichardsEq = true; + cfg.getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", tmp_useRichardsEq); + if (tmp_useRichardsEq=="RICHARDSEQUATION") useRichardsEq = true; cfg.getValue("EXPERIMENT", "Output", experiment); - cfg.getValue("METEOPATH", "Output", outpath); - cfg.getValue("SNOWPATH", "Output", snowpath); - o_snowpath = (!snowpath.empty())? snowpath : outpath; + cfg.getValue("METEOPATH", "Output", outpath, IOUtils::nothrow); + cfg.getValue("SNOWPATH", "Output", o_snowpath, IOUtils::nothrow); + if (o_snowpath.empty()) o_snowpath = outpath; - cfg.getValue("METEOPATH", "Input", inpath); - snowpath = string(); - cfg.getValue("SNOWPATH", "Input", snowpath); - i_snowpath = (!snowpath.empty())? snowpath : inpath; + cfg.getValue("METEOPATH", "Input", inpath, IOUtils::nothrow); + cfg.getValue("SNOWPATH", "Input", i_snowpath, IOUtils::nothrow); + if (i_snowpath.empty()) i_snowpath = inpath; + cfg.getValue("HAZ_WRITE", "Output", haz_write, IOUtils::nothrow); cfg.getValue("OUT_CANOPY", "Output", out_canopy); cfg.getValue("OUT_HAZ", "Output", out_haz); cfg.getValue("OUT_HEAT", "Output", out_heat); @@ -175,8 +195,21 @@ SmetIO::SmetIO(const SnowpackConfig& cfg, const RunInfo& run_info) cfg.getValue("OUT_STAB", "Output", out_stab); cfg.getValue("OUT_SW", "Output", out_sw); cfg.getValue("OUT_T", "Output", out_t); + cfg.getValue("USEREFERENCELAYER", "Output", useReferenceLayer, IOUtils::nothrow); cfg.getValue("TS_DAYS_BETWEEN", "Output", ts_days_between); cfg.getValue("CALCULATION_STEP_LENGTH", "Snowpack", calculation_step_length); + + bool write_acdd = false; + cfg.getValue("ACDD_WRITE", "Output", write_acdd); + if (write_acdd) { + acdd.setEnabled(true); + acdd.setUserConfig(cfg, "Output", false); //do not allow multi-line keys + if (out_haz) { // HACK To avoid troubles in A3D + mio::Date now; + now.setFromSys(); + acdd.addAttribute("history", now.toString(mio::Date::ISO_Z) + ", " + info.user + "@" + info.hostname + ", Snowpack-" + info.version); + } + } } SmetIO::~SmetIO() @@ -194,13 +227,15 @@ SmetIO& SmetIO::operator=(const SmetIO& source) { fixedPositions = source.fixedPositions; outpath = source.outpath; o_snowpath = source.o_snowpath; - snowpath = source.snowpath; experiment = source.experiment; inpath = source.inpath; i_snowpath = source.i_snowpath; + metamorphism_model = source.metamorphism_model; + variant = source.variant; sw_mode = source.sw_mode; //info = source.info; tsWriters = std::map(); //it will have to be re-allocated for thread safety + acdd = source.acdd; in_dflt_TZ = source.in_dflt_TZ; calculation_step_length = source.calculation_step_length; @@ -211,6 +246,8 @@ SmetIO& SmetIO::operator=(const SmetIO& source) { useSoilLayers = source.useSoilLayers; research_mode = source.research_mode; perp_to_slope = source.perp_to_slope; + haz_write = source.haz_write; + useReferenceLayer = source.useReferenceLayer; out_heat = source.out_heat; out_lw = source.out_lw; out_sw = source.out_sw; @@ -222,6 +259,9 @@ SmetIO& SmetIO::operator=(const SmetIO& source) { out_stab = source.out_stab; out_canopy = source.out_canopy; out_soileb = source.out_soileb; + useRichardsEq = source.useRichardsEq; + enable_pref_flow = source.enable_pref_flow; + enable_ice_reservoir = source.enable_ice_reservoir; } return *this; } @@ -250,9 +290,10 @@ bool SmetIO::snowCoverExists(const std::string& i_snowfile, const std::string& / * @param stationID * @param SSdata * @param Zdata + * @param read_salinity */ void SmetIO::readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata) + SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata, const bool& read_salinity) { std::string snofilename( getFilenamePrefix(i_snowfile, i_snowpath, false) ); std::string hazfilename(snofilename); @@ -264,7 +305,7 @@ void SmetIO::readSnowCover(const std::string& i_snowfile, const std::string& sta hazfilename.replace(hazfilename.rfind(".sno"), 4, ".haz"); } - const Date sno_date( read_snosmet(snofilename, stationID, SSdata) ); + const Date sno_date( read_snosmet(snofilename, stationID, SSdata, read_salinity) ); if (FileUtils::fileExists(hazfilename)) { const Date haz_date( read_hazsmet(hazfilename, Zdata) ); if (haz_date != sno_date) @@ -315,7 +356,7 @@ mio::Date SmetIO::read_hazsmet(const std::string& hazfilename, ZwischenData& Zda } //Read SNO SMET file, parse header and fill SSdata with values from the [DATA] section -mio::Date SmetIO::read_snosmet(const std::string& snofilename, const std::string& stationID, SN_SNOWSOIL_DATA& SSdata) +mio::Date SmetIO::read_snosmet(const std::string& snofilename, const std::string& stationID, SN_SNOWSOIL_DATA& SSdata, const bool& read_salinity) const { smet::SMETReader sno_reader(snofilename); Date profile_date( read_snosmet_header(sno_reader, stationID, SSdata) ); @@ -353,10 +394,10 @@ mio::Date SmetIO::read_snosmet(const std::string& snofilename, const std::string ll+1, SSdata.Ldata[ll].depositionDate.toString(Date::ISO).c_str(), SSdata.profileDate.toString(Date::ISO).c_str()); throw IOException("Cannot generate Xdata from file " + sno_reader.get_filename(), AT); } - if (SSdata.Ldata[ll].depositionDate < prev_depositionDate) { + if (SSdata.Ldata[ll].depositionDate < prev_depositionDate && !read_salinity) { // Note: in sea ice it is possible that younger layers are below prn_msg(__FILE__, __LINE__, "err", Date(), "Layer %d is younger (%s) than layer above (%s) !!!", - ll, prev_depositionDate.toString(Date::ISO).c_str(), SSdata.profileDate.toString(Date::ISO).c_str()); + ll, prev_depositionDate.toString(Date::ISO).c_str(), SSdata.Ldata[ll].depositionDate.toString(Date::ISO).c_str()); throw IOException("Cannot generate Xdata from file " + sno_reader.get_filename(), AT); } prev_depositionDate = SSdata.Ldata[ll].depositionDate; @@ -365,7 +406,19 @@ mio::Date SmetIO::read_snosmet(const std::string& snofilename, const std::string SSdata.Ldata[ll].hl = vec_data[current_index++]; SSdata.Ldata[ll].tl = vec_data[current_index++]; SSdata.Ldata[ll].phiIce = vec_data[current_index++]; + if (enable_ice_reservoir) { + SSdata.Ldata[ll].phiIceReservoir = vec_data[current_index++]; + SSdata.Ldata[ll].phiIceReservoirCumul = vec_data[current_index++]; + } else { + SSdata.Ldata[ll].phiIceReservoir = 0.; + SSdata.Ldata[ll].phiIceReservoirCumul = 0.; + } SSdata.Ldata[ll].phiWater = vec_data[current_index++]; + if (enable_pref_flow) { + SSdata.Ldata[ll].phiWaterPref = vec_data[current_index++]; + } else { + SSdata.Ldata[ll].phiWaterPref = 0.; + } SSdata.Ldata[ll].phiVoids = vec_data[current_index++]; SSdata.Ldata[ll].phiSoil = vec_data[current_index++]; @@ -392,6 +445,14 @@ mio::Date SmetIO::read_snosmet(const std::string& snofilename, const std::string SSdata.Ldata[ll].CDot = vec_data[current_index++]; SSdata.Ldata[ll].metamo = vec_data[current_index++]; + if ((metamorphism_model == "NIED") && (read_dsm)) { + SSdata.Ldata[ll].dsm = vec_data[current_index++]; + } + if (read_salinity) { + SSdata.Ldata[ll].salinity = vec_data[current_index++]; + SSdata.Ldata[ll].h = vec_data[current_index++]; + } + for (size_t ii=0; ii vec_timestamp; vector vec_data; vector vec_width, vec_precision; - setFormatting(Xdata.number_of_solutes, vec_width, vec_precision); + setFormatting(Xdata.number_of_solutes, vec_width, vec_precision, write_pref_flow, write_ice_reservoir, (Xdata.Seaice!=NULL)); sno_writer.set_width(vec_width); sno_writer.set_precision(vec_precision); @@ -632,7 +732,10 @@ void SmetIO::writeSnoFile(const std::string& snofilename, const mio::Date& date, vec_data.push_back(EMS[e].L); vec_data.push_back(Xdata.Ndata[e+1].T); vec_data.push_back(EMS[e].theta[ICE]); + if (write_ice_reservoir) vec_data.push_back(EMS[e].theta_i_reservoir); + if (write_ice_reservoir) vec_data.push_back(EMS[e].theta_i_reservoir_cumul); vec_data.push_back(EMS[e].theta[WATER]); + if (write_pref_flow) vec_data.push_back(EMS[e].theta[WATER_PREF]); vec_data.push_back(EMS[e].theta[AIR]); vec_data.push_back(EMS[e].theta[SOIL]); vec_data.push_back(EMS[e].soil[SOIL_RHO]); @@ -648,6 +751,13 @@ void SmetIO::writeSnoFile(const std::string& snofilename, const mio::Date& date, vec_data.push_back(1.); vec_data.push_back(EMS[e].CDot); vec_data.push_back(EMS[e].metamo); + if (metamorphism_model == "NIED") { + vec_data.push_back(EMS[e].dsm); + } + if (Xdata.Seaice != NULL) { + vec_data.push_back(EMS[e].salinity); + vec_data.push_back(EMS[e].h); + } for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { vec_data.push_back(EMS[e].conc(ICE,ii)); @@ -657,7 +767,7 @@ void SmetIO::writeSnoFile(const std::string& snofilename, const mio::Date& date, } } - sno_writer.write(vec_timestamp, vec_data); + sno_writer.write(vec_timestamp, vec_data, mio::ACDD(false)); } void SmetIO::setBasicHeader(const SnowStation& Xdata, const std::string& fields, smet::SMETWriter& smet_writer) @@ -673,6 +783,8 @@ void SmetIO::setBasicHeader(const SnowStation& Xdata, const std::string& fields, smet_writer.set_header_value("longitude", Xdata.meta.position.getLon()); smet_writer.set_header_value("altitude", Xdata.meta.position.getAltitude()); smet_writer.set_header_value("epsg", Xdata.meta.position.getEPSG()); + smet_writer.set_header_value("slope_angle", Xdata.meta.getSlopeAngle()); + smet_writer.set_header_value("slope_azi", Xdata.meta.getAzimuth()); } void SmetIO::setSnoSmetHeader(const SnowStation& Xdata, const Date& date, smet::SMETWriter& smet_writer) @@ -715,6 +827,22 @@ void SmetIO::setSnoSmetHeader(const SnowStation& Xdata, const Date& date, smet:: smet_writer.set_header_value("CanopyLeafAreaIndex", ss.str()); ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.direct_throughfall; smet_writer.set_header_value("CanopyDirectThroughfall", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.int_cap_snow; + smet_writer.set_header_value("CanopySnowIntCapacity", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.can_alb_dry; + smet_writer.set_header_value("CanopyAlbedoDry", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.can_alb_wet; + smet_writer.set_header_value("CanopyAlbedoWet", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.can_alb_snow; + smet_writer.set_header_value("CanopyAlbedoSnow", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.can_diameter; + smet_writer.set_header_value("CanopyDiameter", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.lai_frac_top_default; + smet_writer.set_header_value("CanopyFracLAIUpperLayer", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.Cdata.BasalArea; + smet_writer.set_header_value("CanopyBasalArea", ss.str()); + ss.str(""); ss << fixed << setprecision(2) << Xdata.SoilEmissivity; + smet_writer.set_header_value("SoilEmissivity", ss.str()); // Additional parameters ss.str(""); ss << fixed << setprecision(2) << Xdata.WindScalingFactor; @@ -725,7 +853,7 @@ void SmetIO::setSnoSmetHeader(const SnowStation& Xdata, const Date& date, smet:: } void SmetIO::setFormatting(const size_t& nr_solutes, - std::vector& vec_width, std::vector& vec_precision) + std::vector& vec_width, std::vector& vec_precision, const bool& write_pref_flow, const bool& write_ice_reservoir, const bool& write_sea_ice) { /* * When writing a SNOW SMET file each written parameter may have a different @@ -741,7 +869,16 @@ void SmetIO::setFormatting(const size_t& nr_solutes, vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].L vec_width.push_back(12); vec_precision.push_back(6); //Xdata.Ndata[e+1].T vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].theta[ICE] + if (write_ice_reservoir) { + vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].theta_i_reservoir + } + if (write_ice_reservoir) { + vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].theta_i_reservoir_cumul + } vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].theta[WATER] + if (write_pref_flow) { + vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].theta[WATER_PREF] + } vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].theta[AIR] vec_width.push_back(12); vec_precision.push_back(6); //EMS[e].theta[SOIL] vec_width.push_back(9); vec_precision.push_back(1); //EMS[e].soil[SOIL_RHO] @@ -758,6 +895,11 @@ void SmetIO::setFormatting(const size_t& nr_solutes, vec_width.push_back(15); vec_precision.push_back(6); //EMS[e].CDot vec_width.push_back(15); vec_precision.push_back(6); //EMS[e].metamo + if (write_sea_ice) { + vec_width.push_back(15); vec_precision.push_back(6); //EMS[e].salinity + vec_width.push_back(16); vec_precision.push_back(6); //EMS[e].h + } + for (size_t ii = 0; ii < nr_solutes; ii++) { vec_width.push_back(17); vec_precision.push_back(6); //EMS[e].conc(ICE,ii) vec_width.push_back(18); vec_precision.push_back(7); //EMS[e].conc(WATER,ii) @@ -777,7 +919,7 @@ void SmetIO::setFormatting(const size_t& nr_solutes, * @param Ground Ground level (m) * @param slope_angle (deg) */ -double SmetIO::compPerpPosition(const double& z_vert, const double& hs_ref, const double& ground, const double& cos_sl) +double SmetIO::compPerpPosition(const double& z_vert, const double& hs_ref, const double& ground, const double& cos_sl) const { double pos=0.; if (z_vert == mio::IOUtils::nodata) { @@ -794,51 +936,66 @@ double SmetIO::compPerpPosition(const double& z_vert, const double& hs_ref, cons return pos; } -std::string SmetIO::getFieldsHeader() +std::string SmetIO::getFieldsHeader(const SnowStation& Xdata) const { std::ostringstream os; os << "timestamp "; if (out_heat) - os << "Qs Ql Qg TSG Qg0 Qr" << " "; // 1-2: Turbulent fluxes (W m-2) - // 14-17: Heat flux at lower boundary (W m-2), ground surface temperature (degC), - // Heat flux at gound surface (W m-2), rain energy (W m-2) + os << "Qs Ql Qg TSG Qg0 Qr dIntEnergySnow meltFreezeEnergySnow ColdContentSnow" << " "; //Turbulent fluxes (W m-2) + // Heat flux at lower boundary (W m-2), ground surface temperature (degC), + // Heat flux at gound surface (W m-2), rain energy (W m-2) + // Internal energy change snow (W m-2), Melt freeze part of internal energy change snow (W m-2), Cold content of snow (MJ/kg) if (out_lw) - os << "OLWR ILWR LWR_net" << " "; // 3-5: Longwave radiation fluxes (W m-2) + os << "OLWR ILWR LWR_net" << " "; //Longwave radiation fluxes (W m-2) if (out_sw) - os << "OSWR ISWR Qw pAlbedo mAlbedo ISWR_h ISWR_dir ISWR_diff" << " "; // 6-9: Shortwave radiation fluxes (W m-2) and computed albedo + os << "OSWR ISWR Qw pAlbedo mAlbedo ISWR_h ISWR_dir ISWR_diff" << " "; //Shortwave radiation fluxes (W m-2) and computed albedo if (out_meteo) - os << "TA TSS_mod TSS_meas T_bottom RH VW VW_drift DW MS_Snow HS_mod HS_meas" << " "; // 10-13: Air temperature, snow surface temperature (modeled and measured), temperature at bottom of snow/soil pack (degC) + os << "TA TSS_mod TSS_meas T_bottom RH VW VW_drift DW MS_Snow HS_mod HS_meas" << " "; //Air temperature, snow surface temperature (modeled and measured), temperature at bottom of snow/soil pack (degC) if (out_haz) - os << "hoar_size wind_trans24 HN24 HN72_24" << " ";// 30-33: surface hoar size (mm), 24h drift index (cm), height of new snow HN (cm), 3d sum of daily new snow depths (cm) + os << "hoar_size wind_trans24 HN3 HN6 HN12 HN24 HN72_24 PSUM24 ski_pen" << " ";//surface hoar size (mm), 24h drift index (cm), 3 hours height of new snow HN (cm), 6 hours HN, 12 hours HN, 24 hours HN, 3d sum of daily new snow depths (cm), 24 h new snow water equivalent (kg m-2), skier penetration depth (m) if (out_soileb) os << "dIntEnergySoil meltFreezeEnergySoil ColdContentSoil" << " "; - if (out_mass) - os << "SWE MS_Water MS_Wind MS_Rain MS_SN_Runoff MS_Soil_Runoff MS_Sublimation MS_Evap" << " "; // 34-39: SWE, eroded mass, rain rate, runoff at bottom of snowpack, sublimation and evaporation, all in kg m-2 except rain as rate: kg m-2 h-1; see also 52 & 93. LWC (kg m-2); + if (out_mass) { + os << "SWE MS_Water MS_Water_Soil MS_Ice_Soil MS_Wind MS_Rain MS_SN_Runoff MS_Surface_Mass_Flux MS_Soil_Runoff MS_Sublimation MS_Evap" << " "; + //SWE (kg m-2), LWC (kg m-2), LWC (kg m-2), LWC (kg m-2), eroded mass (kg m-2 h-1), rain rate (kg m-2 h-1), runoff at bottom of snowpack (kg m-2), runoff at the soil surface (kg m-2), runoff at bottom of soil (kg m-2), sublimation and evaporation (both in kg m-2); see also 52 & 93. + // Note: in operational mode, runoff at bottom of snowpack is expressed as kg m-2 h-1 when !cumsum_mass. + if (useRichardsEq && Xdata.meta.getSlopeAngle() > 0.) { + os << "Lateral_flow_snow" << " "; + if (useSoilLayers) os << "Lateral_flow_soil" << " "; + } + } if (out_load) - os << "load "; // 50: Solute load at ground surface + os << "load "; //Solute load at ground surface if (out_t && !fixedPositions.empty()) { - // 40-49: Internal Temperature Time Series at fixed heights, modeled and measured, all in degC + //Internal Temperature Time Series at fixed heights, modeled and measured, all in degC for (size_t ii = 0; ii < fixedPositions.size(); ii++) os << "TS" << ii << " "; } if (out_stab) os << "Sclass1 Sclass2 zSd Sd zSn Sn zSs Ss zS4 S4 zS5 S5" << " "; //S5 is liquidWaterIndex - /*if (out_canopy) - os << " ";*/ + + if (out_canopy) { + os << "Interception_storage Canopy_surface_temperature Canopy_albedo Wet_fraction Interception_capacity Net_shortwave_radiation_absorbed_by_canopy" << " "; + os << "Net_longwave_radiation_absorbed_by_canopy Net_radiation_to_canopy Sensible_heat_flux_to_canopy Latent_heat_flux_to_canopy" << " "; + os << "Biomass_heat_storage_flux_towards_Canopy Transpiration_of_the_canopy Evaporation_and_sublimation_of_interception_(liquid_and_frozen)" << " "; + os << "Interception_rate Throughfall Snow_unload Longwave_radiation_up_above_canopy Longwave_radiation_down_above_canopy" << " "; + os << "Shortwave_radiation_up_above_canopy Shortwave_radiation_down_above_canopy Total_land_surface_albedo" << " "; + os << "Total_net_radiation_to_the_surface_(ground_+_canopy) Surface_radiative_temperature_(ground_+_canopy)" << " "; + os << "Forest_floor_albedo Snowfall_rate_Above_Canopy Rainfall_rate_Above_Canopy Evapotranspiration_of_the_total_surface_(ground_+_canopy)" << " "; + } + + if (Xdata.Seaice != NULL) + os << "Total_thickness Ice_thickness Snow_thickness Snow_thickness_wrt_reference Freeboard Sea_level Bulk_salinity Avg_bulk_salinity Avg_brine_salinity Bottom_salinity_flux Top_salinity_flux Total_Flooding_Bucket" << " "; return os.str(); } -void SmetIO::writeTimeSeriesHeader(const SnowStation& Xdata, smet::SMETWriter& smet_writer) +void SmetIO::writeTimeSeriesHeader(const SnowStation& Xdata, const double& tz, smet::SMETWriter& smet_writer) const { - const std::string fields( getFieldsHeader() ); + const std::string fields( getFieldsHeader(Xdata) ); setBasicHeader(Xdata, fields, smet_writer); - if (out_haz) { // HACK To avoid troubles in A3D - ostringstream ss; - ss << "Snowpack " << info.version << " run by \"" << info.user << "\""; - smet_writer.set_header_value("creator", ss.str()); - } + smet_writer.set_header_value("tz", tz); std::ostringstream units_offset, units_multiplier; units_offset << "0 "; units_multiplier << "1 "; @@ -847,12 +1004,12 @@ void SmetIO::writeTimeSeriesHeader(const SnowStation& Xdata, smet::SMETWriter& s plot_units << "- "; plot_description << "timestamp "; plot_color << "0x000000 "; plot_min << IOUtils::nodata << " "; plot_max << IOUtils::nodata << " "; if (out_heat) { - //"Qs Ql Qg TSG Qg0 Qr" - plot_description << "sensible_heat latent_heat ground_heat ground_temperature ground_heat_at_soil_interface rain_energy" << " "; - plot_units << "W/m2 W/m2 W/m2 K W/m2 W/m2" << " "; - units_offset << "0 0 0 273.15 0 0" << " "; - units_multiplier << "1 1 1 1 1 1" << " "; - plot_color << "0x669933 0x66CC99 0xCC6600 0xDE22E2 0xFFCC00 0x6600FF" << " "; + //"Qs Ql Qg TSG Qg0 Qr dIntEnergySnow meltFreezeEnergySnow ColdContentSnow" + plot_description << "sensible_heat latent_heat ground_heat ground_temperature ground_heat_at_soil_interface rain_energy snow_internal_energy_change snow_melt_freeze_energy snow_cold_content" << " "; + plot_units << "W/m2 W/m2 W/m2 K W/m2 W/m2 W/m2 W/m2 MJ/m2" << " "; + units_offset << "0 0 0 273.15 0 0 0 0 0" << " "; + units_multiplier << "1 1 1 1 1 1 1 1 1" << " "; + plot_color << "0x669933 0x66CC99 0xCC6600 0xDE22E2 0xFFCC00 0x6600FF 0x663300 0x996666 0xCC9966" << " "; plot_min << "" << " "; plot_max << "" << " "; } @@ -887,19 +1044,19 @@ void SmetIO::writeTimeSeriesHeader(const SnowStation& Xdata, smet::SMETWriter& s plot_max << "" << " "; } if (out_haz) { - //"hoar_size wind_trans24 HN24 HN72_24" - plot_description << "hoar_size 24h_wind_drift 24h_height_of_new_snow 3d_sum_of_daily_height_of_new_snow" << " "; - plot_units << "m m m m" << " "; - units_offset << "0 0 0 0" << " "; - units_multiplier << "0.001 0.01 0.01 0.01" << " "; - plot_color << "0x9933FF 0x99FFCC 0x006699 0x33CCCC" << " "; + //"hoar_size wind_trans24 HN3 HN6 HN12 HN24 HN72_24 PSUM24(WC24)" + plot_description << "hoar_size 24h_wind_drift 3h_height_of_new_snow 6h_height_of_new_snow 12h_height_of_new_snow 24h_height_of_new_snow 3d_sum_of_daily_height_of_new_snow 24h_percipitation skier_penetration_depth" << " "; + plot_units << "m m m m m m m m m" << " "; + units_offset << "0 0 0 0 0 0 0 0 0" << " "; + units_multiplier << "0.001 0.01 0.01 0.01 0.01 0.01 0.01 0.001 1" << " "; + plot_color << "0x9933FF 0x99FFCC 0x006699 0x006699 0x006699 0x006699 0x33CCCC 0x006699 0x9933FF" << " "; plot_min << "" << " "; plot_max << "" << " "; } if (out_soileb) { //"dIntEnergySoil meltFreezeEnergySoil ColdContentSoil" plot_description << "soil_internal_energy_change soil_melt_freeze_energy soil_cold_content" << " "; - plot_units << "W/m2 W/m2 J/m2" << " "; + plot_units << "W/m2 W/m2 MJ/m2" << " "; units_offset << "0 0 0" << " "; units_multiplier << "1 1 1" << " "; plot_color << "0x663300 0x996666 0xCC9966" << " "; @@ -907,14 +1064,32 @@ void SmetIO::writeTimeSeriesHeader(const SnowStation& Xdata, smet::SMETWriter& s plot_max << "" << " "; } if (out_mass) { - //"SWE MS_Water MS_Wind MS_Rain MS_SN_Runoff MS_Soil_Runoff MS_Sublimation MS_Evap" - plot_description << "snow_water_equivalent total_amount_of_water erosion_mass_loss rain_rate virtual_lysimeter virtual_lysimeter_under_the_soil sublimation_mass evaporated_mass" << " "; - plot_units << "kg/m2 kg/m2 kg/m2 kg/m2/h kg/m2/h kg/m2/h kg/m2 kg/m2" << " "; - units_offset << "0 0 0 0 0 0 0 0" << " "; - units_multiplier << "1 1 1 1 1 1 1 1" << " "; - plot_color << "0x3300FF 0x0000FF 0x99CCCC 0x3333 0x0066CC 0x003366 0xCCFFFF 0xCCCCFF" << " "; + //"SWE MS_Water MS_Water_Soil MS_Ice_Soil MS_Wind MS_Rain MS_SN_Runoff MS_Surface_mass_flux MS_Soil_Runoff MS_Sublimation MS_Evap" + plot_description << "snow_water_equivalent total_amount_of_water total_amount_of_water_soil total_amount_of_ice_soil erosion_mass_loss rain_rate virtual_lysimeter_surface_snow_only surface_mass_flux virtual_lysimeter_under_the_soil sublimation_mass evaporated_mass" << " "; + plot_units << "kg/m2 kg/m2 kg/m2 kg/m2 kg/m2/h kg/m2/h kg/m2 kg/m2 kg/m2 kg/m2 kg/m2" << " "; + units_offset << "0 0 0 0 0 0 0 0 0 0 0" << " "; + units_multiplier << "1 1 1 1 1 1 1 1 1 1 1" << " "; + plot_color << "0x3300FF 0x3300FF 0x3300FF 0x3300FF 0x0000FF 0x99CCCC 0x3333 0x0066CC 0x003366 0xCCFFFF 0xCCCCFF" << " "; plot_min << "" << " "; plot_max << "" << " "; + if (useRichardsEq && Xdata.meta.getSlopeAngle() > 0.) { + plot_description << "lateral_flow_snow" << " "; + plot_units << "kg/m2" << " "; + units_offset << "0" << " "; + units_multiplier << "1" << " "; + plot_color << "#A3A3CC" << " "; + plot_min << "" << " "; + plot_max << "" << " "; + if (useSoilLayers) { + plot_description << "lateral_flow_soil" << " "; + plot_units << "kg/m2" << " "; + units_offset << "0" << " "; + units_multiplier << "1" << " "; + plot_color << "#CCA3A3" << " "; + plot_min << "" << " "; + plot_max << "" << " "; + } + } } if (out_load) { //"load" @@ -948,9 +1123,33 @@ void SmetIO::writeTimeSeriesHeader(const SnowStation& Xdata, smet::SMETWriter& s plot_min << "" << " "; plot_max << "" << " "; } - /*if (out_canopy) { - os << " "; - } */ + if (out_canopy) { + plot_description << "Interception_storage Canopy_surface_temperature Canopy_albedo Wet_fraction Interception_capacity Net_shortwave_radiation_absorbed_by_canopy" << " "; + plot_description << "Net_longwave_radiation_absorbed_by_canopy Net_radiation_to_canopy Sensible_heat_flux_to_canopy Latent_heat_flux_to_canopy" << " "; + plot_description << "Biomass_heat_storage_flux_towards_Canopy Transpiration_of_the_canopy Evaporation_and_sublimation_of_interception_(liquid_and_frozen)" << " "; + plot_description << "Interception_rate Throughfall Snow_unload Longwave_radiation_up_above_canopy Longwave_radiation_down_above_canopy" << " "; + plot_description << "Shortwave_radiation_up_above_canopy Shortwave_radiation_down_above_canopy Total_land_surface_albedo" << " "; + plot_description << "Total_net_radiation_to_the_surface_(ground_+_canopy) Surface_radiative_temperature_(ground_+_canopy)" << " "; + plot_description << "Forest_floor_albedo Snowfall_rate_Above_Canopy Rainfall_rate_Above_Canopy Evapotranspiration_of_the_total_surface_(ground_+_canopy)" << " "; + plot_units << "kg/m2 degC - - kg/m2 W/m2 W/m2 W/m2 W/m2 W/m2 W/m2 kg/m2/timestep kg/m2/timestep kg/m2/timestep kg/m2/timestep kg/m2/timestep W/m2" << " "; + plot_units << "W/m2 W/m2 W/m2 - W/m2 degC - kg/m2/timestep kg/m2/timestep kg/m2/timestep" << " "; + units_offset << "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" << " "; + units_multiplier << "1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1" << " "; + plot_color << "0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000" << " "; + plot_color << "0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000" << " "; + plot_color << "0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000" << " "; + plot_min << "" << " "; + plot_max << "" << " "; + } + if (Xdata.Seaice != NULL) { + plot_description << "total_thickness ice_thickness snow_thickness snow_thickness_wrt_ref freeboard sea_level tot_salinity avg_bulk_salinity avg_brine_salinity bottom_sal_flux top_sal_flux total_flooding_bucket_scheme" << " "; + plot_units << "m m m m m m g/m2 g/kg g/kg g/m2 g/m2 kg/m2" << " "; + units_offset << "0 0 0 0 0 0 0 0 0 0 0 0" << " "; + units_multiplier << "1 1 1 1 1 1 1 1 1 1 1 1" << " "; + plot_color << "0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000 0xFF0000" << " "; + plot_min << "" << " "; + plot_max << "" << " "; + } smet_writer.set_header_value("units_offset", units_offset.str()); smet_writer.set_header_value("units_multiplier", units_multiplier.str()); @@ -961,10 +1160,13 @@ void SmetIO::writeTimeSeriesHeader(const SnowStation& Xdata, smet::SMETWriter& s //smet_writer.set_header_value("plot_max", plot_max.str()); } -void SmetIO::writeTimeSeriesData(const SnowStation& Xdata, const SurfaceFluxes& Sdata, const CurrentMeteo& Mdata, const ProcessDat& Hdata, const double &wind_trans24, smet::SMETWriter& smet_writer) +void SmetIO::writeTimeSeriesData(const SnowStation& Xdata, const SurfaceFluxes& Sdata, const CurrentMeteo& Mdata, const ProcessDat& Hdata, const double &wind_trans24, smet::SMETWriter& smet_writer) const { std::vector timestamp( 1, Mdata.date.toString(mio::Date::ISO) ); std::vector data; + std::vector vec_precision, vec_width; //set meaningful precision/width for each column + const int dflt_precision = 3; //default precision + const int dflt_width = 8; //default width const vector& NDS = Xdata.Ndata; const size_t nN = Xdata.getNumberOfNodes(); @@ -972,75 +1174,127 @@ void SmetIO::writeTimeSeriesData(const SnowStation& Xdata, const SurfaceFluxes& //data.push_back( ); if (out_heat) { - data.push_back( Sdata.qs ); - data.push_back( Sdata.ql ); - data.push_back( Sdata.qg ); - data.push_back( IOUtils::K_TO_C(NDS[Xdata.SoilNode].T) ); - data.push_back( Sdata.qg0 ); - data.push_back( Sdata.qr ); + data.push_back( Sdata.qs ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.ql ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.qg ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( IOUtils::K_TO_C(NDS[Xdata.SoilNode].T) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.qg0 ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.qr ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + const size_t nCalcSteps = (!avgsum_time_series) ? (static_cast( ts_days_between / M_TO_D(calculation_step_length) + 0.5 )) : (1); + data.push_back( (Sdata.dIntEnergy * static_cast(nCalcSteps)) / 1000. ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (Sdata.meltFreezeEnergy * static_cast(nCalcSteps)) / 1000. ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.ColdContent/1e6 ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } if (out_lw) { - data.push_back( Sdata.lw_out ); - data.push_back( Sdata.lw_in ); - data.push_back( Sdata.lw_net ); + data.push_back( Sdata.lw_out ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.lw_in ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.lw_net ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } if (out_sw) { - data.push_back( Sdata.sw_out ); - data.push_back( Sdata.sw_in ); - data.push_back( Sdata.qw ); - data.push_back( Sdata.pAlbedo ); - data.push_back( Sdata.mAlbedo ); - data.push_back( Sdata.sw_hor ); - data.push_back( Sdata.sw_dir ); - data.push_back( Sdata.sw_diff ); + data.push_back( Sdata.sw_out ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.sw_in ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.qw ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.pAlbedo ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.mAlbedo ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.sw_hor ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.sw_dir ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.sw_diff ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } if (out_meteo) { - data.push_back( IOUtils::K_TO_C(Mdata.ta) ); - data.push_back( IOUtils::K_TO_C(NDS[nN-1].T) ); - data.push_back( IOUtils::K_TO_C(Mdata.tss) ); - data.push_back( IOUtils::K_TO_C(NDS[0].T) ); - data.push_back( 100.*Mdata.rh ); - data.push_back( Mdata.vw ); - data.push_back( Mdata.vw_drift ); - data.push_back( Mdata.dw ); - data.push_back( Sdata.mass[SurfaceFluxes::MS_HNW] ); - data.push_back( M_TO_CM((Xdata.cH - Xdata.Ground)/cos_sl) ); + data.push_back( IOUtils::K_TO_C(Mdata.ta) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( IOUtils::K_TO_C(NDS[nN-1].T) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( IOUtils::K_TO_C(Mdata.tss) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( IOUtils::K_TO_C(NDS[0].T) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( 100.*Mdata.rh ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Mdata.vw ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Mdata.vw_drift ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Mdata.dw ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.mass[SurfaceFluxes::MS_HNW] ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + const double ReferenceLevel = (!useReferenceLayer || Xdata.findMarkedReferenceLayer() == IOUtils::nodata) ? (0.) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground); + data.push_back( M_TO_CM((Xdata.cH - ReferenceLevel - Xdata.Ground)/cos_sl) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); if (Xdata.mH!=Constants::undefined) data.push_back( M_TO_CM((Xdata.mH - Xdata.Ground)/cos_sl) ); else data.push_back( IOUtils::nodata ); + vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } if (out_haz) { - data.push_back( Hdata.hoar_size ); - data.push_back( wind_trans24 ); - data.push_back( (perp_to_slope? Hdata.hn24/cos_sl : Hdata.hn24) ); - data.push_back( (perp_to_slope? Hdata.hn72_24/cos_sl : Hdata.hn72_24) ); + data.push_back( Hdata.hoar_size ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( wind_trans24 ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (perp_to_slope? Hdata.hn3/cos_sl : Hdata.hn3) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (perp_to_slope? Hdata.hn6/cos_sl : Hdata.hn6) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (perp_to_slope? Hdata.hn12/cos_sl : Hdata.hn12) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (perp_to_slope? Hdata.hn24/cos_sl : Hdata.hn24) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (perp_to_slope? Hdata.hn72_24/cos_sl : Hdata.hn72_24) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (perp_to_slope? Hdata.psum24/cos_sl : Hdata.psum24) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + const double penetrationDepth = StabilityAlgorithms::compPenetrationDepth(Xdata); + data.push_back( penetrationDepth ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + } if (out_soileb) { - const size_t nCalcSteps = static_cast( ts_days_between / M_TO_D(calculation_step_length) + 0.5 ); - data.push_back( (Sdata.dIntEnergySoil * static_cast(nCalcSteps)) / 1000. ); - data.push_back( (Sdata.meltFreezeEnergySoil * static_cast(nCalcSteps)) / 1000. ); - data.push_back( Xdata.ColdContentSoil/1e6 ); + const size_t nCalcSteps = (!avgsum_time_series) ? (static_cast( ts_days_between / M_TO_D(calculation_step_length) + 0.5 )) : (1); + data.push_back( (Sdata.dIntEnergySoil * static_cast(nCalcSteps)) / 1000. ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( (Sdata.meltFreezeEnergySoil * static_cast(nCalcSteps)) / 1000. ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.ColdContentSoil/1e6 ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } if (out_mass) { data.push_back( Sdata.mass[SurfaceFluxes::MS_SWE]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); data.push_back( Sdata.mass[SurfaceFluxes::MS_WATER]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); + data.push_back( Sdata.mass[SurfaceFluxes::MS_WATER_SOIL]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); + data.push_back( Sdata.mass[SurfaceFluxes::MS_ICE_SOIL]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); data.push_back( Sdata.mass[SurfaceFluxes::MS_WIND]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); data.push_back( Sdata.mass[SurfaceFluxes::MS_RAIN] ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); data.push_back( Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); + data.push_back( Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); data.push_back( (useSoilLayers? Sdata.mass[SurfaceFluxes::MS_SOIL_RUNOFF] / Xdata.cos_sl : IOUtils::nodata) ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); data.push_back( Sdata.mass[SurfaceFluxes::MS_SUBLIMATION]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); data.push_back( Sdata.mass[SurfaceFluxes::MS_EVAPORATION]/cos_sl ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); + if (useRichardsEq && Xdata.meta.getSlopeAngle() > 0.) { + data.push_back( Xdata.getTotalLateralFlowSnow() ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); + if (useSoilLayers) { + data.push_back( Xdata.getTotalLateralFlowSoil() ); + vec_precision.push_back(dflt_precision); + vec_width.push_back(dflt_width); + } + } } if (out_load) { - data.push_back( Sdata.load[0] ); + if (!Sdata.load.empty()) + data.push_back( Sdata.load[0] ); + else + data.push_back( IOUtils::nodata ); + vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } if (out_t && !fixedPositions.empty()) { @@ -1052,27 +1306,86 @@ void SmetIO::writeTimeSeriesData(const SnowStation& Xdata, const SurfaceFluxes& data.push_back( IOUtils::K_TO_C(T) ); else data.push_back( Constants::undefined ); + vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } } if (out_stab) { - data.push_back( Xdata.S_class1 ); - data.push_back( Xdata.S_class2 ); - data.push_back( M_TO_CM(Xdata.z_S_d/cos_sl) ); - data.push_back( Xdata.S_d ); - data.push_back( M_TO_CM(Xdata.z_S_n/cos_sl) ); - data.push_back( Xdata.S_n ); - data.push_back( M_TO_CM(Xdata.z_S_s/cos_sl) ); - data.push_back( Xdata.S_s ); - data.push_back( M_TO_CM(Xdata.z_S_4/cos_sl) ); - data.push_back( Xdata.S_4 ); - data.push_back( M_TO_CM(Xdata.z_S_5/cos_sl) ); - data.push_back( Xdata.getLiquidWaterIndex() ); //Xdata.S_5 HACK + data.push_back( Xdata.S_class1 ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.S_class2 ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( M_TO_CM(Xdata.z_S_d/cos_sl) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.S_d ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( M_TO_CM(Xdata.z_S_n/cos_sl) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.S_n ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( M_TO_CM(Xdata.z_S_s/cos_sl) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.S_s ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( M_TO_CM(Xdata.z_S_4/cos_sl) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.S_4 ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( M_TO_CM(Xdata.z_S_5/cos_sl) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.getLiquidWaterIndex() ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); //Xdata.S_5 HACK + } + + if (out_canopy) { + // PRIMARY "STATE" VARIABLES + data.push_back( Xdata.Cdata.storage/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // intercepted water (mm or kg m-2) + data.push_back( IOUtils::K_TO_C( Xdata.Cdata.temp) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // temperature (degC) + + // SECONDARY "STATE" VARIABLES + data.push_back( Xdata.Cdata.canopyalb ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // albedo (1) + data.push_back( Xdata.Cdata.wetfraction ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // wet fraction + data.push_back( Xdata.Cdata.intcapacity/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // interception capacity (kg m-2) + + // RADIATIVE FLUXES (W m-2) + data.push_back( Xdata.Cdata.rsnet ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // net shortwave radiation to canopy + data.push_back( Xdata.Cdata.rlnet ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // net longwave radiation to canopy + data.push_back( Xdata.Cdata.rsnet+ Xdata.Cdata.rlnet ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // net radiation to canopy + + // HEAT FLUXES CANOPY (W m-2) + data.push_back( - Xdata.Cdata.sensible ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // sensible heat flux to canopy (>0 towards canopy) + data.push_back( - Xdata.Cdata.latentcorr ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // latent heat flux to canopy (>0 towards canopy) + data.push_back( Xdata.Cdata.CondFluxCanop ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // biomass heat storage flux towards Canopy + + // WATER FLUXES CANOPY (kg m-2) + data.push_back( Xdata.Cdata.transp/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // transpiration + data.push_back( Xdata.Cdata.intevap/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // interception evaporation + data.push_back( Xdata.Cdata.interception/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // interception + data.push_back( Xdata.Cdata.throughfall/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // throughfall + data.push_back( Xdata.Cdata.snowunload/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // unload of snow + + // TOTAL SURFACE FLUXES,EVAPORATION; ETC + data.push_back( Xdata.Cdata.rlwrac ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // upward longwave radiation ABOVE canopy + data.push_back( Xdata.Cdata.ilwrac ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // upward longwave radiation ABOVE canopy + data.push_back( Xdata.Cdata.rswrac ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // upward longwave radiation ABOVE canopy + data.push_back( Xdata.Cdata.iswrac ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // upward longwave radiation ABOVE canopy + data.push_back( Xdata.Cdata.totalalb ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // total albedo [-] + data.push_back( Xdata.Cdata.rlnet+Sdata.lw_net+ Xdata.Cdata.rsnet+Sdata.qw ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // net radiation to the total surface + data.push_back( IOUtils::K_TO_C(pow( Xdata.Cdata.rlwrac/Constants::stefan_boltzmann, 0.25)) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // surface (ground + canopy) temperature + data.push_back( Xdata.Cdata.forestfloor_alb ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // albedo of the forest floor [-] + data.push_back( Xdata.Cdata.snowfac/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // snowfall rate above canopy (mm per output timestep) + data.push_back( Xdata.Cdata.rainfac/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); // rainfall rate above canopy (mm per output timestep) + data.push_back( (Xdata.Cdata.transp+ Xdata.Cdata.intevap-(Sdata.mass[SurfaceFluxes::MS_SUBLIMATION]+Sdata.mass[SurfaceFluxes::MS_EVAPORATION]))/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); //evapotranspiration of total surface (mm h-1) +} + + if (Xdata.Seaice != NULL) { + data.push_back( Xdata.cH - Xdata.Ground ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Ndata[Xdata.Seaice->IceSurfaceNode].z - Xdata.Ground ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Ndata[Xdata.getNumberOfNodes()-1].z - Xdata.Ndata[Xdata.Seaice->IceSurfaceNode].z ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + // Check reference level: either a marked reference level, or, if non existent, the sea level (if sea ice module is used), otherwise 0: + const double ReferenceLevel = ( Xdata.findMarkedReferenceLayer()==IOUtils::nodata || !useReferenceLayer ) ? ( (Xdata.Seaice==NULL)?(0.):(Xdata.Seaice->SeaLevel) ) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground); + data.push_back( Xdata.Ndata[Xdata.getNumberOfNodes()-1].z - ReferenceLevel ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Seaice->FreeBoard ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Seaice->SeaLevel ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Seaice->getTotSalinity(Xdata) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Seaice->getAvgBulkSalinity(Xdata) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Seaice->getAvgBrineSalinity(Xdata) ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Seaice->BottomSalFlux ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Xdata.Seaice->TopSalFlux ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); + data.push_back( Sdata.mass[SurfaceFluxes::MS_FLOODING]/cos_sl ); vec_precision.push_back(dflt_precision); vec_width.push_back(dflt_width); } - /*if (out_canopy) - os << " ";*/ - smet_writer.write(timestamp, data); + smet_writer.set_precision(vec_precision); + smet_writer.set_width(vec_width); + smet_writer.write(timestamp, data, acdd); } void SmetIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sdata, const CurrentMeteo& Mdata, @@ -1089,10 +1402,10 @@ void SmetIO::writeTimeSeries(const SnowStation& Xdata, const SurfaceFluxes& Sdat throw InvalidNameException(filename, AT); if (FileUtils::fileExists(filename)) { - tsWriters[filename] = new smet::SMETWriter(filename, getFieldsHeader(), IOUtils::nodata); //set to append mode + tsWriters[filename] = new smet::SMETWriter(filename, getFieldsHeader(Xdata), IOUtils::nodata); //set to append mode } else { tsWriters[filename] = new smet::SMETWriter(filename); - writeTimeSeriesHeader(Xdata, *tsWriters[filename]); + writeTimeSeriesHeader(Xdata, Mdata.date.getTimeZone(), *tsWriters[filename]); } } diff --git a/third_party/snowpack/plugins/SmetIO.h b/third_party/snowpack/plugins/SmetIO.h index c8978211..8e417f2b 100644 --- a/third_party/snowpack/plugins/SmetIO.h +++ b/third_party/snowpack/plugins/SmetIO.h @@ -21,10 +21,10 @@ #ifndef SMET_IO_H #define SMET_IO_H +#include #include "../Constants.h" -#include "../Hazard.h" #include "SnowpackIOInterface.h" -#include +#include "../Hazard.h" #include @@ -33,15 +33,15 @@ class SmetIO : public SnowpackIOInterface { public: SmetIO(const SnowpackConfig& i_cfg, const RunInfo& run_info); SmetIO(const SmetIO&); - + ~SmetIO(); - + SmetIO& operator=(const SmetIO&); ///& vec_width, std::vector& vec_precision); - void writeSnoFile(const std::string& snofilename, const mio::Date& date, const SnowStation& Xdata, - const ZwischenData& Zdata) const; - bool keyExists(const smet::SMETReader& reader, const std::string& key) const; - double get_doubleval(const smet::SMETReader& reader, const std::string& keyname) const; - int get_intval(const smet::SMETReader& reader, const std::string& keyname) const; - mio::Date read_snosmet(const std::string& snofilename, const std::string& stationID, SN_SNOWSOIL_DATA& SSdata); - mio::Date read_snosmet_header(const smet::SMETReader& sno_reader, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata); - double compPerpPosition(const double& z_vert, const double& hs_ref, - const double& ground, const double& cos_sl); - std::string getFieldsHeader(); - void writeTimeSeriesHeader(const SnowStation& Xdata, smet::SMETWriter& smet_writer); - void writeTimeSeriesData(const SnowStation& Xdata, const SurfaceFluxes& Sdata, const CurrentMeteo& Mdata, const ProcessDat& Hdata, const double &wind_trans24, smet::SMETWriter& smet_writer); + std::vector& vec_width, std::vector& vec_precision, const bool& write_pref_flow, const bool& write_ice_reservoir, const bool& write_sea_ice); + + static bool keyExists(const smet::SMETReader& reader, const std::string& key); + static double get_doubleval(const smet::SMETReader& reader, const std::string& keyname); + static double get_doubleval_no_error(const smet::SMETReader& reader, const std::string& key); + static int get_intval(const smet::SMETReader& reader, const std::string& keyname); private: std::vector fixedPositions; - std::string outpath, o_snowpath, snowpath, experiment, inpath, i_snowpath, sw_mode; + std::string outpath, o_snowpath, experiment, inpath, i_snowpath; + std::string metamorphism_model, variant, sw_mode; const RunInfo info; std::map tsWriters; ///< for each filename, we keep an associated SMETWriter + mio::ACDD acdd; double in_dflt_TZ; double calculation_step_length, ts_days_between; double min_depth_subsurf; - bool avgsum_time_series, useCanopyModel, useSoilLayers, research_mode, perp_to_slope; + bool avgsum_time_series, useCanopyModel, useSoilLayers, research_mode, perp_to_slope, haz_write; + bool useReferenceLayer; //Whether or not the output should be referenced to the marked reference layer (i.e., the layer with int(mk/1000)==9). bool out_heat, out_lw, out_sw, out_meteo, out_haz, out_mass, out_t, out_load, out_stab, out_canopy, out_soileb; + bool useRichardsEq; + bool enable_pref_flow; + bool enable_ice_reservoir; + bool read_dsm; }; #endif diff --git a/third_party/snowpack/plugins/SnowpackIO.cmake.cc b/third_party/snowpack/plugins/SnowpackIO.cmake.cc index 1fbc1354..3efe4755 100644 --- a/third_party/snowpack/plugins/SnowpackIO.cmake.cc +++ b/third_party/snowpack/plugins/SnowpackIO.cmake.cc @@ -20,8 +20,9 @@ #include "SnowpackIO.h" -#include "AsciiIO.h" #include "SmetIO.h" +#include "AsciiIO.h" +#include #cmakedefine PLUGIN_IMISIO #cmakedefine PLUGIN_CAAMLIO @@ -45,8 +46,16 @@ SnowpackIO::SnowpackIO(const SnowpackConfig& cfg): output_ts_as_ascii(false), output_ts_as_smet(false), output_haz_as_imis(false) { + //enforce UTF8 output globally (ie also for cout, cerr) + std::locale utf8; + try { + utf8.global(std::locale(std::locale(), std::locale("C.UTF-8"), std::locale::ctype)); + } catch (std::runtime_error&) { + //std::cerr << "[W] Could not force the output to be UTF8, some special characters might not be shown properly in output files.\n"; + } + //Format of initial snow profile: - const string in_snow = cfg.get("SNOW", "Input"); + const std::string in_snow = cfg.get("SNOW", "Input", "SMET"); if (in_snow == "SNOOLD") { input_snow_as_ascii = true; } else if (in_snow == "CAAML") { @@ -57,23 +66,28 @@ SnowpackIO::SnowpackIO(const SnowpackConfig& cfg): throw InvalidArgumentException("Invalid input snow profile format '"+in_snow+"'. Please choose from SMET, CAAML, SNOOLD", AT); //Format of transitional and final snow profile(s): - const string out_snow = cfg.get("SNOW", "Output"); - if (out_snow == "SNOOLD") { - output_snow_as_ascii = true; - vecExtension.push_back("snoold"); //Snow-cover profile file (I/O) - } else if (out_snow == "CAAML") { - output_snow_as_caaml = true; - vecExtension.push_back("haz"); //Snow-cover profile file (I/O) - vecExtension.push_back("caaml"); //Snow-cover profile file (I/O & SnopViz) - vecExtension.push_back("acaaml"); //Aggregated snow-cover profile file (I/O & SnopViz) - } else if (out_snow == "SMET") { - output_snow_as_smet = true; - vecExtension.push_back("haz"); //Snow-cover profile file (I/O) - vecExtension.push_back("sno"); //Snow-cover profile file (I/O) - } else - throw InvalidArgumentException("Invalid output snow profile format '"+out_snow+"'. Please choose from SMET, CAAML, SNOOLD", AT); + const bool snow_out = cfg.get("SNOW_WRITE", "Output"); + const bool haz_out = cfg.get("HAZ_WRITE", "Output"); + const bool a3d_pts = cfg.get("ALPINE3D_PTS", "SnowpackAdvanced"); + if (snow_out || a3d_pts) { + const string out_snow = cfg.get("SNOW", "Output"); + if (out_snow == "SNOOLD") { + output_snow_as_ascii = true; + vecExtension.push_back("snoold"); //Snow-cover profile file (I/O) + } else if (out_snow == "CAAML") { + output_snow_as_caaml = true; + if (haz_out) vecExtension.push_back("haz"); //Snow-cover profile file (I/O) + vecExtension.push_back("caaml"); //Snow-cover profile file (I/O & SnopViz) + vecExtension.push_back("acaaml"); //Aggregated snow-cover profile file (I/O & SnopViz) + } else if (out_snow == "SMET") { + output_snow_as_smet = true; + if (haz_out) vecExtension.push_back("haz"); //Snow-cover profile file (I/O) + vecExtension.push_back("sno"); //Snow-cover profile file (I/O) + } else + throw InvalidArgumentException("Invalid output snow profile format '"+out_snow+"'. Please choose from SMET, CAAML, SNOOLD", AT); + } - std::vector vecProfileFmt = cfg.get("PROF_FORMAT", "Output"); + const std::vector vecProfileFmt = cfg.get("PROF_FORMAT", "Output"); if (vecProfileFmt.size() > 3) { throw InvalidArgumentException("The key PROF_FORMAT in [Output] can take three values at most", AT); } else { @@ -97,7 +111,7 @@ SnowpackIO::SnowpackIO(const SnowpackConfig& cfg): //Format of meteo time series: const bool ts_out = cfg.get("TS_WRITE", "Output"); if (ts_out==true) { - const std::string ts_format = cfg.get("TS_FORMAT", "Output"); + const std::string ts_format = cfg.get("TS_FORMAT", "Output", "SMET"); if (ts_format=="SMET") { output_ts_as_smet = true; vecExtension.push_back("smet"); //Classical time series (meteo, snow temperatures, etc.) @@ -107,7 +121,7 @@ SnowpackIO::SnowpackIO(const SnowpackConfig& cfg): } else throw InvalidArgumentException("The key TS_FORMAT in [Output] takes only SMET or MET as value", AT); } - + vecExtension.push_back("ini"); //Record of run configuration //set the "plugins" pointers @@ -138,6 +152,7 @@ SnowpackIO::~SnowpackIO() if (asciiio != NULL) delete asciiio; if (caamlio != NULL) delete caamlio; if (imisdbio != NULL) delete imisdbio; + locale::global(locale("C")); //To suppress "still reachable" reports by valgrind, set locale back to default } std::vector SnowpackIO::getExtensions() @@ -159,16 +174,16 @@ bool SnowpackIO::snowCoverExists(const std::string& i_snowfile, const std::strin } void SnowpackIO::readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata) + SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata, const bool& read_salinity) { if (input_snow_as_ascii) { - asciiio->readSnowCover(i_snowfile, stationID, SSdata, Zdata); + asciiio->readSnowCover(i_snowfile, stationID, SSdata, Zdata, read_salinity); #ifdef PLUGIN_CAAMLIO } else if (input_snow_as_caaml) { - caamlio->readSnowCover(i_snowfile, stationID, SSdata, Zdata); + caamlio->readSnowCover(i_snowfile, stationID, SSdata, Zdata, read_salinity); #endif } else { - smetio->readSnowCover(i_snowfile, stationID, SSdata, Zdata); + smetio->readSnowCover(i_snowfile, stationID, SSdata, Zdata, read_salinity); } } diff --git a/third_party/snowpack/plugins/SnowpackIO.h b/third_party/snowpack/plugins/SnowpackIO.h index 034620a4..2e512068 100644 --- a/third_party/snowpack/plugins/SnowpackIO.h +++ b/third_party/snowpack/plugins/SnowpackIO.h @@ -46,9 +46,12 @@ * -# the intial state of the various soil and snow layers * * Very often, 1) and 2) are provided together. But this depends ultimately on the file format that is used ot provide such data (SMET, INP, etc). These two points are - * handled by MeteoIO, so please check its documentation (for the last official release, it is available - * online), in the "Available plugins and usage" section for the relevant formats. - * It is recommended to prepare the data in the SMET file format for its ease of use. + * handled by MeteoIO, so please check its documentation (for the last official release, it is available + * online), in the "Available plugins and usage" section for the relevant formats. + * It is recommended to prepare the data in the SMET file format for its ease of use. + * In this case, you should also consider providing 1) as SMET for a simulation starting without snow on the ground (have a look at the files provided + * in the examples) or as CAAML for a simulation starting with snow on the ground (have a look at niViz to create a + * CAAML snow profile). * * Please also check the \ref requirements "Data requirements" page. * @@ -60,7 +63,7 @@ * - a time serie of the meteorological data and fluxes as used in the model. * * Depending on the chosen output format, 1) and 2) might be provided as one file or two files. Moreover, since %Snowpack pre-processes all the - * meteorological input data with MeteoIO, the forcing data that is seen in the core of the model might be different than + * meteorological input data with MeteoIO, the forcing data that is seen in the core of the model might be different than * the provided input data. In order to better fine tune the parameters of this pre-processing, it is possible to request a copy of the * pre-processed meteorological data by setting the key WRITE_PROCESSED_METEO to TRUE in the [Output] section. * @@ -71,7 +74,7 @@ *
FieldDescription
rggrain radius [mm]
* * - * + * * *
KeyDescriptionExtra requirements
\subpage smet "SMET"SMET based profile (including the hazard data), recommended
\subpage caaml "CAAML"CAAML profilelibxml
\subpage caaml "CAAML"CAAML profile
\subpage snoold_format "SNOOLD"legacy %Snowpack profile (including the hazard data)
* @@ -86,10 +89,26 @@ * \subpage prf_format "PRF"tabular profile time series * \subpage profile_imis "IMIS"write profile time series to the IMIS databaseOracle's OCCI library * + * When the snow grain shapes are provided as Swiss Code, it means the following: the code is made of three decimal numbers, noted as F1F2F3. Here F1 + * represents the primary grain shape and F2 the secondary grain shape. The grain shapes can be any of the following: + *
+ * + * + * + * + * + * + *
CodeGrain shapeCodeGrain shape
1Precipitation particules (PP)6Surface hoar (SH)
2Decomposing fragmented PP (DF)7Melt forms (MF)
3Rounded grains (RG)8Ice formations (IF)
4Faceted crystals (FC)9Rounding faceted particules (FCxr)
5Depth hoar (DH)
+ * + * In case of Melt-freeze crust (MFcr), it is marked by using the \em 772 code. * * @section available_met_ts Fluxes time series * %Snowpack computes various meteorological parameters as well as fluxes and can write them out as time series. - * Currently, only the \subpage met_format "MET format" is supported. + *
+ * + * + * + *
KeyDescriptionExtra requirements
\subpage met_format "MET"legacy %Snowpack time series for visualization with sngui
\subpage smet "SMET"smet formatted time series for visualization with SnopViz
* */ class SnowpackIO : public SnowpackIOInterface { @@ -102,7 +121,7 @@ class SnowpackIO : public SnowpackIOInterface { virtual bool snowCoverExists(const std::string& i_snowfile, const std::string& stationID) const; virtual void readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata); + SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata, const bool& read_salinity); virtual void writeSnowCover(const mio::Date& date, const SnowStation& Xdata, const ZwischenData& Zdata, const bool& forbackup=false); diff --git a/third_party/snowpack/plugins/SnowpackIOInterface.h b/third_party/snowpack/plugins/SnowpackIOInterface.h index bcd49cd9..e7e62191 100644 --- a/third_party/snowpack/plugins/SnowpackIOInterface.h +++ b/third_party/snowpack/plugins/SnowpackIOInterface.h @@ -32,7 +32,7 @@ class SnowpackIOInterface { virtual bool snowCoverExists(const std::string& i_snowfile, const std::string& stationID) const = 0; virtual void readSnowCover(const std::string& i_snowfile, const std::string& stationID, - SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata) = 0; + SN_SNOWSOIL_DATA& SSdata, ZwischenData& Zdata, const bool& read_salinity) = 0; virtual void writeSnowCover(const mio::Date& date, const SnowStation& Xdata, const ZwischenData& Zdata, const bool& forbackup=false) = 0; diff --git a/third_party/snowpack/plugins/pugixml/pugiconfig.hpp b/third_party/snowpack/plugins/pugixml/pugiconfig.hpp new file mode 100644 index 00000000..0713b0ef --- /dev/null +++ b/third_party/snowpack/plugins/pugixml/pugiconfig.hpp @@ -0,0 +1,77 @@ +/** + * pugixml parser - version 1.12 + * -------------------------------------------------------- + * Copyright (C) 2006-2022, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com) + * Report bugs and download new versions at https://pugixml.org/ + * + * This library is distributed under the MIT License. See notice at the end + * of this file. + * + * This work is based on the pugxml parser, which is: + * Copyright (C) 2003, by Kristen Wegner (kristen@tima.net) + */ + +#ifndef HEADER_PUGICONFIG_HPP +#define HEADER_PUGICONFIG_HPP + +// Uncomment this to enable wchar_t mode +// #define PUGIXML_WCHAR_MODE + +// Uncomment this to enable compact mode +// #define PUGIXML_COMPACT + +// Uncomment this to disable XPath +// #define PUGIXML_NO_XPATH + +// Uncomment this to disable STL +// #define PUGIXML_NO_STL + +// Uncomment this to disable exceptions +// #define PUGIXML_NO_EXCEPTIONS + +// Set this to control attributes for public classes/functions, i.e.: +// #define PUGIXML_API __declspec(dllexport) // to export all public symbols from DLL +// #define PUGIXML_CLASS __declspec(dllimport) // to import all classes from DLL +// #define PUGIXML_FUNCTION __fastcall // to set calling conventions to all public functions to fastcall +// In absence of PUGIXML_CLASS/PUGIXML_FUNCTION definitions PUGIXML_API is used instead + +// Tune these constants to adjust memory-related behavior +// #define PUGIXML_MEMORY_PAGE_SIZE 32768 +// #define PUGIXML_MEMORY_OUTPUT_STACK 10240 +// #define PUGIXML_MEMORY_XPATH_PAGE_SIZE 4096 + +// Tune this constant to adjust max nesting for XPath queries +// #define PUGIXML_XPATH_DEPTH_LIMIT 1024 + +// Uncomment this to switch to header-only version +// #define PUGIXML_HEADER_ONLY + +// Uncomment this to enable long long support +// #define PUGIXML_HAS_LONG_LONG + +#endif + +/** + * Copyright (c) 2006-2022 Arseny Kapoulkine + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ diff --git a/third_party/snowpack/plugins/pugixml/pugixml.cpp b/third_party/snowpack/plugins/pugixml/pugixml.cpp new file mode 100644 index 00000000..60b55da3 --- /dev/null +++ b/third_party/snowpack/plugins/pugixml/pugixml.cpp @@ -0,0 +1,13029 @@ +/** + * pugixml parser - version 1.12 + * -------------------------------------------------------- + * Copyright (C) 2006-2022, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com) + * Report bugs and download new versions at https://pugixml.org/ + * + * This library is distributed under the MIT License. See notice at the end + * of this file. + * + * This work is based on the pugxml parser, which is: + * Copyright (C) 2003, by Kristen Wegner (kristen@tima.net) + */ + +#ifndef SOURCE_PUGIXML_CPP +#define SOURCE_PUGIXML_CPP + +#include "pugixml.hpp" + +#include +#include +#include +#include +#include + +#ifdef PUGIXML_WCHAR_MODE +# include +#endif + +#ifndef PUGIXML_NO_XPATH +# include +# include +#endif + +#ifndef PUGIXML_NO_STL +# include +# include +# include +#endif + +// For placement new +#include + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable: 4127) // conditional expression is constant +# pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +# pragma warning(disable: 4702) // unreachable code +# pragma warning(disable: 4996) // this function or variable may be unsafe +#endif + +#if defined(_MSC_VER) && defined(__c2__) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated" // this function or variable may be unsafe +#endif + +#ifdef __INTEL_COMPILER +# pragma warning(disable: 177) // function was declared but never referenced +# pragma warning(disable: 279) // controlling expression is constant +# pragma warning(disable: 1478 1786) // function was declared "deprecated" +# pragma warning(disable: 1684) // conversion from pointer to same-sized integral type +#endif + +#if defined(__BORLANDC__) && defined(PUGIXML_HEADER_ONLY) +# pragma warn -8080 // symbol is declared but never used; disabling this inside push/pop bracket does not make the warning go away +#endif + +#ifdef __BORLANDC__ +# pragma option push +# pragma warn -8008 // condition is always false +# pragma warn -8066 // unreachable code +#endif + +#ifdef __SNC__ +// Using diag_push/diag_pop does not disable the warnings inside templates due to a compiler bug +# pragma diag_suppress=178 // function was declared but never referenced +# pragma diag_suppress=237 // controlling expression is constant +#endif + +#ifdef __TI_COMPILER_VERSION__ +# pragma diag_suppress 179 // function was declared but never referenced +#endif + +// Inlining controls +#if defined(_MSC_VER) && _MSC_VER >= 1300 +# define PUGI__NO_INLINE __declspec(noinline) +#elif defined(__GNUC__) +# define PUGI__NO_INLINE __attribute__((noinline)) +#else +# define PUGI__NO_INLINE +#endif + +// Branch weight controls +#if defined(__GNUC__) && !defined(__c2__) +# define PUGI__UNLIKELY(cond) __builtin_expect(cond, 0) +#else +# define PUGI__UNLIKELY(cond) (cond) +#endif + +// Simple static assertion +#define PUGI__STATIC_ASSERT(cond) { static const char condition_failed[(cond) ? 1 : -1] = {0}; (void)condition_failed[0]; } + +// Digital Mars C++ bug workaround for passing char loaded from memory via stack +#ifdef __DMC__ +# define PUGI__DMC_VOLATILE volatile +#else +# define PUGI__DMC_VOLATILE +#endif + +// Integer sanitizer workaround; we only apply this for clang since gcc8 has no_sanitize but not unsigned-integer-overflow and produces "attribute directive ignored" warnings +#if defined(__clang__) && defined(__has_attribute) +# if __has_attribute(no_sanitize) +# define PUGI__UNSIGNED_OVERFLOW __attribute__((no_sanitize("unsigned-integer-overflow"))) +# else +# define PUGI__UNSIGNED_OVERFLOW +# endif +#else +# define PUGI__UNSIGNED_OVERFLOW +#endif + +// Borland C++ bug workaround for not defining ::memcpy depending on header include order (can't always use std::memcpy because some compilers don't have it at all) +#if defined(__BORLANDC__) && !defined(__MEM_H_USING_LIST) +using std::memcpy; +using std::memmove; +using std::memset; +#endif + +// Some MinGW/GCC versions have headers that erroneously omit LLONG_MIN/LLONG_MAX/ULLONG_MAX definitions from limits.h in some configurations +#if defined(PUGIXML_HAS_LONG_LONG) && defined(__GNUC__) && !defined(LLONG_MAX) && !defined(LLONG_MIN) && !defined(ULLONG_MAX) +# define LLONG_MIN (-LLONG_MAX - 1LL) +# define LLONG_MAX __LONG_LONG_MAX__ +# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) +#endif + +// In some environments MSVC is a compiler but the CRT lacks certain MSVC-specific features +#if defined(_MSC_VER) && !defined(__S3E__) && !defined(_WIN32_WCE) +# define PUGI__MSVC_CRT_VERSION _MSC_VER +#elif defined(_WIN32_WCE) +# define PUGI__MSVC_CRT_VERSION 1310 // MSVC7.1 +#endif + +// Not all platforms have snprintf; we define a wrapper that uses snprintf if possible. This only works with buffers with a known size. +#if __cplusplus >= 201103 +# define PUGI__SNPRINTF(buf, ...) snprintf(buf, sizeof(buf), __VA_ARGS__) +#elif defined(PUGI__MSVC_CRT_VERSION) && PUGI__MSVC_CRT_VERSION >= 1400 +# define PUGI__SNPRINTF(buf, ...) _snprintf_s(buf, _countof(buf), _TRUNCATE, __VA_ARGS__) +#else +# define PUGI__SNPRINTF sprintf +#endif + +// We put implementation details into an anonymous namespace in source mode, but have to keep it in non-anonymous namespace in header-only mode to prevent binary bloat. +#ifdef PUGIXML_HEADER_ONLY +# define PUGI__NS_BEGIN namespace pugi { namespace impl { +# define PUGI__NS_END } } +# define PUGI__FN inline +# define PUGI__FN_NO_INLINE inline +#else +# if defined(_MSC_VER) && _MSC_VER < 1300 // MSVC6 seems to have an amusing bug with anonymous namespaces inside namespaces +# define PUGI__NS_BEGIN namespace pugi { namespace impl { +# define PUGI__NS_END } } +# else +# define PUGI__NS_BEGIN namespace pugi { namespace impl { namespace { +# define PUGI__NS_END } } } +# endif +# define PUGI__FN +# define PUGI__FN_NO_INLINE PUGI__NO_INLINE +#endif + +// uintptr_t +#if (defined(_MSC_VER) && _MSC_VER < 1600) || (defined(__BORLANDC__) && __BORLANDC__ < 0x561) +namespace pugi +{ +# ifndef _UINTPTR_T_DEFINED + typedef size_t uintptr_t; +# endif + + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +} +#else +# include +#endif + +// Memory allocation +PUGI__NS_BEGIN + PUGI__FN void* default_allocate(size_t size) + { + return malloc(size); + } + + PUGI__FN void default_deallocate(void* ptr) + { + free(ptr); + } + + template + struct xml_memory_management_function_storage + { + static allocation_function allocate; + static deallocation_function deallocate; + }; + + // Global allocation functions are stored in class statics so that in header mode linker deduplicates them + // Without a template<> we'll get multiple definitions of the same static + template allocation_function xml_memory_management_function_storage::allocate = default_allocate; + template deallocation_function xml_memory_management_function_storage::deallocate = default_deallocate; + + typedef xml_memory_management_function_storage xml_memory; +PUGI__NS_END + +// String utilities +PUGI__NS_BEGIN + // Get string length + PUGI__FN size_t strlength(const char_t* s) + { + assert(s); + + #ifdef PUGIXML_WCHAR_MODE + return wcslen(s); + #else + return strlen(s); + #endif + } + + // Compare two strings + PUGI__FN bool strequal(const char_t* src, const char_t* dst) + { + assert(src && dst); + + #ifdef PUGIXML_WCHAR_MODE + return wcscmp(src, dst) == 0; + #else + return strcmp(src, dst) == 0; + #endif + } + + // Compare lhs with [rhs_begin, rhs_end) + PUGI__FN bool strequalrange(const char_t* lhs, const char_t* rhs, size_t count) + { + for (size_t i = 0; i < count; ++i) + if (lhs[i] != rhs[i]) + return false; + + return lhs[count] == 0; + } + + // Get length of wide string, even if CRT lacks wide character support + PUGI__FN size_t strlength_wide(const wchar_t* s) + { + assert(s); + + #ifdef PUGIXML_WCHAR_MODE + return wcslen(s); + #else + const wchar_t* end = s; + while (*end) end++; + return static_cast(end - s); + #endif + } +PUGI__NS_END + +// auto_ptr-like object for exception recovery +PUGI__NS_BEGIN + template struct auto_deleter + { + typedef void (*D)(T*); + + T* data; + D deleter; + + auto_deleter(T* data_, D deleter_): data(data_), deleter(deleter_) + { + } + + ~auto_deleter() + { + if (data) deleter(data); + } + + T* release() + { + T* result = data; + data = 0; + return result; + } + }; +PUGI__NS_END + +#ifdef PUGIXML_COMPACT +PUGI__NS_BEGIN + class compact_hash_table + { + public: + compact_hash_table(): _items(0), _capacity(0), _count(0) + { + } + + void clear() + { + if (_items) + { + xml_memory::deallocate(_items); + _items = 0; + _capacity = 0; + _count = 0; + } + } + + void* find(const void* key) + { + if (_capacity == 0) return 0; + + item_t* item = get_item(key); + assert(item); + assert(item->key == key || (item->key == 0 && item->value == 0)); + + return item->value; + } + + void insert(const void* key, void* value) + { + assert(_capacity != 0 && _count < _capacity - _capacity / 4); + + item_t* item = get_item(key); + assert(item); + + if (item->key == 0) + { + _count++; + item->key = key; + } + + item->value = value; + } + + bool reserve(size_t extra = 16) + { + if (_count + extra >= _capacity - _capacity / 4) + return rehash(_count + extra); + + return true; + } + + private: + struct item_t + { + const void* key; + void* value; + }; + + item_t* _items; + size_t _capacity; + + size_t _count; + + bool rehash(size_t count); + + item_t* get_item(const void* key) + { + assert(key); + assert(_capacity > 0); + + size_t hashmod = _capacity - 1; + size_t bucket = hash(key) & hashmod; + + for (size_t probe = 0; probe <= hashmod; ++probe) + { + item_t& probe_item = _items[bucket]; + + if (probe_item.key == key || probe_item.key == 0) + return &probe_item; + + // hash collision, quadratic probing + bucket = (bucket + probe + 1) & hashmod; + } + + assert(false && "Hash table is full"); // unreachable + return 0; + } + + static PUGI__UNSIGNED_OVERFLOW unsigned int hash(const void* key) + { + unsigned int h = static_cast(reinterpret_cast(key) & 0xffffffff); + + // MurmurHash3 32-bit finalizer + h ^= h >> 16; + h *= 0x85ebca6bu; + h ^= h >> 13; + h *= 0xc2b2ae35u; + h ^= h >> 16; + + return h; + } + }; + + PUGI__FN_NO_INLINE bool compact_hash_table::rehash(size_t count) + { + size_t capacity = 32; + while (count >= capacity - capacity / 4) + capacity *= 2; + + compact_hash_table rt; + rt._capacity = capacity; + rt._items = static_cast(xml_memory::allocate(sizeof(item_t) * capacity)); + + if (!rt._items) + return false; + + memset(rt._items, 0, sizeof(item_t) * capacity); + + for (size_t i = 0; i < _capacity; ++i) + if (_items[i].key) + rt.insert(_items[i].key, _items[i].value); + + if (_items) + xml_memory::deallocate(_items); + + _capacity = capacity; + _items = rt._items; + + assert(_count == rt._count); + + return true; + } + +PUGI__NS_END +#endif + +PUGI__NS_BEGIN +#ifdef PUGIXML_COMPACT + static const uintptr_t xml_memory_block_alignment = 4; +#else + static const uintptr_t xml_memory_block_alignment = sizeof(void*); +#endif + + // extra metadata bits + static const uintptr_t xml_memory_page_contents_shared_mask = 64; + static const uintptr_t xml_memory_page_name_allocated_mask = 32; + static const uintptr_t xml_memory_page_value_allocated_mask = 16; + static const uintptr_t xml_memory_page_type_mask = 15; + + // combined masks for string uniqueness + static const uintptr_t xml_memory_page_name_allocated_or_shared_mask = xml_memory_page_name_allocated_mask | xml_memory_page_contents_shared_mask; + static const uintptr_t xml_memory_page_value_allocated_or_shared_mask = xml_memory_page_value_allocated_mask | xml_memory_page_contents_shared_mask; + +#ifdef PUGIXML_COMPACT + #define PUGI__GETHEADER_IMPL(object, page, flags) // unused + #define PUGI__GETPAGE_IMPL(header) (header).get_page() +#else + #define PUGI__GETHEADER_IMPL(object, page, flags) (((reinterpret_cast(object) - reinterpret_cast(page)) << 8) | (flags)) + // this macro casts pointers through void* to avoid 'cast increases required alignment of target type' warnings + #define PUGI__GETPAGE_IMPL(header) static_cast(const_cast(static_cast(reinterpret_cast(&header) - (header >> 8)))) +#endif + + #define PUGI__GETPAGE(n) PUGI__GETPAGE_IMPL((n)->header) + #define PUGI__NODETYPE(n) static_cast((n)->header & impl::xml_memory_page_type_mask) + + struct xml_allocator; + + struct xml_memory_page + { + static xml_memory_page* construct(void* memory) + { + xml_memory_page* result = static_cast(memory); + + result->allocator = 0; + result->prev = 0; + result->next = 0; + result->busy_size = 0; + result->freed_size = 0; + + #ifdef PUGIXML_COMPACT + result->compact_string_base = 0; + result->compact_shared_parent = 0; + result->compact_page_marker = 0; + #endif + + return result; + } + + xml_allocator* allocator; + + xml_memory_page* prev; + xml_memory_page* next; + + size_t busy_size; + size_t freed_size; + + #ifdef PUGIXML_COMPACT + char_t* compact_string_base; + void* compact_shared_parent; + uint32_t* compact_page_marker; + #endif + }; + + static const size_t xml_memory_page_size = + #ifdef PUGIXML_MEMORY_PAGE_SIZE + (PUGIXML_MEMORY_PAGE_SIZE) + #else + 32768 + #endif + - sizeof(xml_memory_page); + + struct xml_memory_string_header + { + uint16_t page_offset; // offset from page->data + uint16_t full_size; // 0 if string occupies whole page + }; + + struct xml_allocator + { + xml_allocator(xml_memory_page* root): _root(root), _busy_size(root->busy_size) + { + #ifdef PUGIXML_COMPACT + _hash = 0; + #endif + } + + xml_memory_page* allocate_page(size_t data_size) + { + size_t size = sizeof(xml_memory_page) + data_size; + + // allocate block with some alignment, leaving memory for worst-case padding + void* memory = xml_memory::allocate(size); + if (!memory) return 0; + + // prepare page structure + xml_memory_page* page = xml_memory_page::construct(memory); + assert(page); + + assert(this == _root->allocator); + page->allocator = this; + + return page; + } + + static void deallocate_page(xml_memory_page* page) + { + xml_memory::deallocate(page); + } + + void* allocate_memory_oob(size_t size, xml_memory_page*& out_page); + + void* allocate_memory(size_t size, xml_memory_page*& out_page) + { + if (PUGI__UNLIKELY(_busy_size + size > xml_memory_page_size)) + return allocate_memory_oob(size, out_page); + + void* buf = reinterpret_cast(_root) + sizeof(xml_memory_page) + _busy_size; + + _busy_size += size; + + out_page = _root; + + return buf; + } + + #ifdef PUGIXML_COMPACT + void* allocate_object(size_t size, xml_memory_page*& out_page) + { + void* result = allocate_memory(size + sizeof(uint32_t), out_page); + if (!result) return 0; + + // adjust for marker + ptrdiff_t offset = static_cast(result) - reinterpret_cast(out_page->compact_page_marker); + + if (PUGI__UNLIKELY(static_cast(offset) >= 256 * xml_memory_block_alignment)) + { + // insert new marker + uint32_t* marker = static_cast(result); + + *marker = static_cast(reinterpret_cast(marker) - reinterpret_cast(out_page)); + out_page->compact_page_marker = marker; + + // since we don't reuse the page space until we reallocate it, we can just pretend that we freed the marker block + // this will make sure deallocate_memory correctly tracks the size + out_page->freed_size += sizeof(uint32_t); + + return marker + 1; + } + else + { + // roll back uint32_t part + _busy_size -= sizeof(uint32_t); + + return result; + } + } + #else + void* allocate_object(size_t size, xml_memory_page*& out_page) + { + return allocate_memory(size, out_page); + } + #endif + + void deallocate_memory(void* ptr, size_t size, xml_memory_page* page) + { + if (page == _root) page->busy_size = _busy_size; + + assert(ptr >= reinterpret_cast(page) + sizeof(xml_memory_page) && ptr < reinterpret_cast(page) + sizeof(xml_memory_page) + page->busy_size); + (void)!ptr; + + page->freed_size += size; + assert(page->freed_size <= page->busy_size); + + if (page->freed_size == page->busy_size) + { + if (page->next == 0) + { + assert(_root == page); + + // top page freed, just reset sizes + page->busy_size = 0; + page->freed_size = 0; + + #ifdef PUGIXML_COMPACT + // reset compact state to maximize efficiency + page->compact_string_base = 0; + page->compact_shared_parent = 0; + page->compact_page_marker = 0; + #endif + + _busy_size = 0; + } + else + { + assert(_root != page); + assert(page->prev); + + // remove from the list + page->prev->next = page->next; + page->next->prev = page->prev; + + // deallocate + deallocate_page(page); + } + } + } + + char_t* allocate_string(size_t length) + { + static const size_t max_encoded_offset = (1 << 16) * xml_memory_block_alignment; + + PUGI__STATIC_ASSERT(xml_memory_page_size <= max_encoded_offset); + + // allocate memory for string and header block + size_t size = sizeof(xml_memory_string_header) + length * sizeof(char_t); + + // round size up to block alignment boundary + size_t full_size = (size + (xml_memory_block_alignment - 1)) & ~(xml_memory_block_alignment - 1); + + xml_memory_page* page; + xml_memory_string_header* header = static_cast(allocate_memory(full_size, page)); + + if (!header) return 0; + + // setup header + ptrdiff_t page_offset = reinterpret_cast(header) - reinterpret_cast(page) - sizeof(xml_memory_page); + + assert(page_offset % xml_memory_block_alignment == 0); + assert(page_offset >= 0 && static_cast(page_offset) < max_encoded_offset); + header->page_offset = static_cast(static_cast(page_offset) / xml_memory_block_alignment); + + // full_size == 0 for large strings that occupy the whole page + assert(full_size % xml_memory_block_alignment == 0); + assert(full_size < max_encoded_offset || (page->busy_size == full_size && page_offset == 0)); + header->full_size = static_cast(full_size < max_encoded_offset ? full_size / xml_memory_block_alignment : 0); + + // round-trip through void* to avoid 'cast increases required alignment of target type' warning + // header is guaranteed a pointer-sized alignment, which should be enough for char_t + return static_cast(static_cast(header + 1)); + } + + void deallocate_string(char_t* string) + { + // this function casts pointers through void* to avoid 'cast increases required alignment of target type' warnings + // we're guaranteed the proper (pointer-sized) alignment on the input string if it was allocated via allocate_string + + // get header + xml_memory_string_header* header = static_cast(static_cast(string)) - 1; + assert(header); + + // deallocate + size_t page_offset = sizeof(xml_memory_page) + header->page_offset * xml_memory_block_alignment; + xml_memory_page* page = reinterpret_cast(static_cast(reinterpret_cast(header) - page_offset)); + + // if full_size == 0 then this string occupies the whole page + size_t full_size = header->full_size == 0 ? page->busy_size : header->full_size * xml_memory_block_alignment; + + deallocate_memory(header, full_size, page); + } + + bool reserve() + { + #ifdef PUGIXML_COMPACT + return _hash->reserve(); + #else + return true; + #endif + } + + xml_memory_page* _root; + size_t _busy_size; + + #ifdef PUGIXML_COMPACT + compact_hash_table* _hash; + #endif + }; + + PUGI__FN_NO_INLINE void* xml_allocator::allocate_memory_oob(size_t size, xml_memory_page*& out_page) + { + const size_t large_allocation_threshold = xml_memory_page_size / 4; + + xml_memory_page* page = allocate_page(size <= large_allocation_threshold ? xml_memory_page_size : size); + out_page = page; + + if (!page) return 0; + + if (size <= large_allocation_threshold) + { + _root->busy_size = _busy_size; + + // insert page at the end of linked list + page->prev = _root; + _root->next = page; + _root = page; + + _busy_size = size; + } + else + { + // insert page before the end of linked list, so that it is deleted as soon as possible + // the last page is not deleted even if it's empty (see deallocate_memory) + assert(_root->prev); + + page->prev = _root->prev; + page->next = _root; + + _root->prev->next = page; + _root->prev = page; + + page->busy_size = size; + } + + return reinterpret_cast(page) + sizeof(xml_memory_page); + } +PUGI__NS_END + +#ifdef PUGIXML_COMPACT +PUGI__NS_BEGIN + static const uintptr_t compact_alignment_log2 = 2; + static const uintptr_t compact_alignment = 1 << compact_alignment_log2; + + class compact_header + { + public: + compact_header(xml_memory_page* page, unsigned int flags) + { + PUGI__STATIC_ASSERT(xml_memory_block_alignment == compact_alignment); + + ptrdiff_t offset = (reinterpret_cast(this) - reinterpret_cast(page->compact_page_marker)); + assert(offset % compact_alignment == 0 && static_cast(offset) < 256 * compact_alignment); + + _page = static_cast(offset >> compact_alignment_log2); + _flags = static_cast(flags); + } + + void operator&=(uintptr_t mod) + { + _flags &= static_cast(mod); + } + + void operator|=(uintptr_t mod) + { + _flags |= static_cast(mod); + } + + uintptr_t operator&(uintptr_t mod) const + { + return _flags & mod; + } + + xml_memory_page* get_page() const + { + // round-trip through void* to silence 'cast increases required alignment of target type' warnings + const char* page_marker = reinterpret_cast(this) - (_page << compact_alignment_log2); + const char* page = page_marker - *reinterpret_cast(static_cast(page_marker)); + + return const_cast(reinterpret_cast(static_cast(page))); + } + + private: + unsigned char _page; + unsigned char _flags; + }; + + PUGI__FN xml_memory_page* compact_get_page(const void* object, int header_offset) + { + const compact_header* header = reinterpret_cast(static_cast(object) - header_offset); + + return header->get_page(); + } + + template PUGI__FN_NO_INLINE T* compact_get_value(const void* object) + { + return static_cast(compact_get_page(object, header_offset)->allocator->_hash->find(object)); + } + + template PUGI__FN_NO_INLINE void compact_set_value(const void* object, T* value) + { + compact_get_page(object, header_offset)->allocator->_hash->insert(object, value); + } + + template class compact_pointer + { + public: + compact_pointer(): _data(0) + { + } + + void operator=(const compact_pointer& rhs) + { + *this = rhs + 0; + } + + void operator=(T* value) + { + if (value) + { + // value is guaranteed to be compact-aligned; 'this' is not + // our decoding is based on 'this' aligned to compact alignment downwards (see operator T*) + // so for negative offsets (e.g. -3) we need to adjust the diff by compact_alignment - 1 to + // compensate for arithmetic shift rounding for negative values + ptrdiff_t diff = reinterpret_cast(value) - reinterpret_cast(this); + ptrdiff_t offset = ((diff + int(compact_alignment - 1)) >> compact_alignment_log2) - start; + + if (static_cast(offset) <= 253) + _data = static_cast(offset + 1); + else + { + compact_set_value(this, value); + + _data = 255; + } + } + else + _data = 0; + } + + operator T*() const + { + if (_data) + { + if (_data < 255) + { + uintptr_t base = reinterpret_cast(this) & ~(compact_alignment - 1); + + return reinterpret_cast(base + (_data - 1 + start) * compact_alignment); + } + else + return compact_get_value(this); + } + else + return 0; + } + + T* operator->() const + { + return *this; + } + + private: + unsigned char _data; + }; + + template class compact_pointer_parent + { + public: + compact_pointer_parent(): _data(0) + { + } + + void operator=(const compact_pointer_parent& rhs) + { + *this = rhs + 0; + } + + void operator=(T* value) + { + if (value) + { + // value is guaranteed to be compact-aligned; 'this' is not + // our decoding is based on 'this' aligned to compact alignment downwards (see operator T*) + // so for negative offsets (e.g. -3) we need to adjust the diff by compact_alignment - 1 to + // compensate for arithmetic shift behavior for negative values + ptrdiff_t diff = reinterpret_cast(value) - reinterpret_cast(this); + ptrdiff_t offset = ((diff + int(compact_alignment - 1)) >> compact_alignment_log2) + 65533; + + if (static_cast(offset) <= 65533) + { + _data = static_cast(offset + 1); + } + else + { + xml_memory_page* page = compact_get_page(this, header_offset); + + if (PUGI__UNLIKELY(page->compact_shared_parent == 0)) + page->compact_shared_parent = value; + + if (page->compact_shared_parent == value) + { + _data = 65534; + } + else + { + compact_set_value(this, value); + + _data = 65535; + } + } + } + else + { + _data = 0; + } + } + + operator T*() const + { + if (_data) + { + if (_data < 65534) + { + uintptr_t base = reinterpret_cast(this) & ~(compact_alignment - 1); + + return reinterpret_cast(base + (_data - 1 - 65533) * compact_alignment); + } + else if (_data == 65534) + return static_cast(compact_get_page(this, header_offset)->compact_shared_parent); + else + return compact_get_value(this); + } + else + return 0; + } + + T* operator->() const + { + return *this; + } + + private: + uint16_t _data; + }; + + template class compact_string + { + public: + compact_string(): _data(0) + { + } + + void operator=(const compact_string& rhs) + { + *this = rhs + 0; + } + + void operator=(char_t* value) + { + if (value) + { + xml_memory_page* page = compact_get_page(this, header_offset); + + if (PUGI__UNLIKELY(page->compact_string_base == 0)) + page->compact_string_base = value; + + ptrdiff_t offset = value - page->compact_string_base; + + if (static_cast(offset) < (65535 << 7)) + { + // round-trip through void* to silence 'cast increases required alignment of target type' warnings + uint16_t* base = reinterpret_cast(static_cast(reinterpret_cast(this) - base_offset)); + + if (*base == 0) + { + *base = static_cast((offset >> 7) + 1); + _data = static_cast((offset & 127) + 1); + } + else + { + ptrdiff_t remainder = offset - ((*base - 1) << 7); + + if (static_cast(remainder) <= 253) + { + _data = static_cast(remainder + 1); + } + else + { + compact_set_value(this, value); + + _data = 255; + } + } + } + else + { + compact_set_value(this, value); + + _data = 255; + } + } + else + { + _data = 0; + } + } + + operator char_t*() const + { + if (_data) + { + if (_data < 255) + { + xml_memory_page* page = compact_get_page(this, header_offset); + + // round-trip through void* to silence 'cast increases required alignment of target type' warnings + const uint16_t* base = reinterpret_cast(static_cast(reinterpret_cast(this) - base_offset)); + assert(*base); + + ptrdiff_t offset = ((*base - 1) << 7) + (_data - 1); + + return page->compact_string_base + offset; + } + else + { + return compact_get_value(this); + } + } + else + return 0; + } + + private: + unsigned char _data; + }; +PUGI__NS_END +#endif + +#ifdef PUGIXML_COMPACT +namespace pugi +{ + struct xml_attribute_struct + { + xml_attribute_struct(impl::xml_memory_page* page): header(page, 0), namevalue_base(0) + { + PUGI__STATIC_ASSERT(sizeof(xml_attribute_struct) == 8); + } + + impl::compact_header header; + + uint16_t namevalue_base; + + impl::compact_string<4, 2> name; + impl::compact_string<5, 3> value; + + impl::compact_pointer prev_attribute_c; + impl::compact_pointer next_attribute; + }; + + struct xml_node_struct + { + xml_node_struct(impl::xml_memory_page* page, xml_node_type type): header(page, type), namevalue_base(0) + { + PUGI__STATIC_ASSERT(sizeof(xml_node_struct) == 12); + } + + impl::compact_header header; + + uint16_t namevalue_base; + + impl::compact_string<4, 2> name; + impl::compact_string<5, 3> value; + + impl::compact_pointer_parent parent; + + impl::compact_pointer first_child; + + impl::compact_pointer prev_sibling_c; + impl::compact_pointer next_sibling; + + impl::compact_pointer first_attribute; + }; +} +#else +namespace pugi +{ + struct xml_attribute_struct + { + xml_attribute_struct(impl::xml_memory_page* page): name(0), value(0), prev_attribute_c(0), next_attribute(0) + { + header = PUGI__GETHEADER_IMPL(this, page, 0); + } + + uintptr_t header; + + char_t* name; + char_t* value; + + xml_attribute_struct* prev_attribute_c; + xml_attribute_struct* next_attribute; + }; + + struct xml_node_struct + { + xml_node_struct(impl::xml_memory_page* page, xml_node_type type): name(0), value(0), parent(0), first_child(0), prev_sibling_c(0), next_sibling(0), first_attribute(0) + { + header = PUGI__GETHEADER_IMPL(this, page, type); + } + + uintptr_t header; + + char_t* name; + char_t* value; + + xml_node_struct* parent; + + xml_node_struct* first_child; + + xml_node_struct* prev_sibling_c; + xml_node_struct* next_sibling; + + xml_attribute_struct* first_attribute; + }; +} +#endif + +PUGI__NS_BEGIN + struct xml_extra_buffer + { + char_t* buffer; + xml_extra_buffer* next; + }; + + struct xml_document_struct: public xml_node_struct, public xml_allocator + { + xml_document_struct(xml_memory_page* page): xml_node_struct(page, node_document), xml_allocator(page), buffer(0), extra_buffers(0) + { + } + + const char_t* buffer; + + xml_extra_buffer* extra_buffers; + + #ifdef PUGIXML_COMPACT + compact_hash_table hash; + #endif + }; + + template inline xml_allocator& get_allocator(const Object* object) + { + assert(object); + + return *PUGI__GETPAGE(object)->allocator; + } + + template inline xml_document_struct& get_document(const Object* object) + { + assert(object); + + return *static_cast(PUGI__GETPAGE(object)->allocator); + } +PUGI__NS_END + +// Low-level DOM operations +PUGI__NS_BEGIN + inline xml_attribute_struct* allocate_attribute(xml_allocator& alloc) + { + xml_memory_page* page; + void* memory = alloc.allocate_object(sizeof(xml_attribute_struct), page); + if (!memory) return 0; + + return new (memory) xml_attribute_struct(page); + } + + inline xml_node_struct* allocate_node(xml_allocator& alloc, xml_node_type type) + { + xml_memory_page* page; + void* memory = alloc.allocate_object(sizeof(xml_node_struct), page); + if (!memory) return 0; + + return new (memory) xml_node_struct(page, type); + } + + inline void destroy_attribute(xml_attribute_struct* a, xml_allocator& alloc) + { + if (a->header & impl::xml_memory_page_name_allocated_mask) + alloc.deallocate_string(a->name); + + if (a->header & impl::xml_memory_page_value_allocated_mask) + alloc.deallocate_string(a->value); + + alloc.deallocate_memory(a, sizeof(xml_attribute_struct), PUGI__GETPAGE(a)); + } + + inline void destroy_node(xml_node_struct* n, xml_allocator& alloc) + { + if (n->header & impl::xml_memory_page_name_allocated_mask) + alloc.deallocate_string(n->name); + + if (n->header & impl::xml_memory_page_value_allocated_mask) + alloc.deallocate_string(n->value); + + for (xml_attribute_struct* attr = n->first_attribute; attr; ) + { + xml_attribute_struct* next = attr->next_attribute; + + destroy_attribute(attr, alloc); + + attr = next; + } + + for (xml_node_struct* child = n->first_child; child; ) + { + xml_node_struct* next = child->next_sibling; + + destroy_node(child, alloc); + + child = next; + } + + alloc.deallocate_memory(n, sizeof(xml_node_struct), PUGI__GETPAGE(n)); + } + + inline void append_node(xml_node_struct* child, xml_node_struct* node) + { + child->parent = node; + + xml_node_struct* head = node->first_child; + + if (head) + { + xml_node_struct* tail = head->prev_sibling_c; + + tail->next_sibling = child; + child->prev_sibling_c = tail; + head->prev_sibling_c = child; + } + else + { + node->first_child = child; + child->prev_sibling_c = child; + } + } + + inline void prepend_node(xml_node_struct* child, xml_node_struct* node) + { + child->parent = node; + + xml_node_struct* head = node->first_child; + + if (head) + { + child->prev_sibling_c = head->prev_sibling_c; + head->prev_sibling_c = child; + } + else + child->prev_sibling_c = child; + + child->next_sibling = head; + node->first_child = child; + } + + inline void insert_node_after(xml_node_struct* child, xml_node_struct* node) + { + xml_node_struct* parent = node->parent; + + child->parent = parent; + + if (node->next_sibling) + node->next_sibling->prev_sibling_c = child; + else + parent->first_child->prev_sibling_c = child; + + child->next_sibling = node->next_sibling; + child->prev_sibling_c = node; + + node->next_sibling = child; + } + + inline void insert_node_before(xml_node_struct* child, xml_node_struct* node) + { + xml_node_struct* parent = node->parent; + + child->parent = parent; + + if (node->prev_sibling_c->next_sibling) + node->prev_sibling_c->next_sibling = child; + else + parent->first_child = child; + + child->prev_sibling_c = node->prev_sibling_c; + child->next_sibling = node; + + node->prev_sibling_c = child; + } + + inline void remove_node(xml_node_struct* node) + { + xml_node_struct* parent = node->parent; + + if (node->next_sibling) + node->next_sibling->prev_sibling_c = node->prev_sibling_c; + else + parent->first_child->prev_sibling_c = node->prev_sibling_c; + + if (node->prev_sibling_c->next_sibling) + node->prev_sibling_c->next_sibling = node->next_sibling; + else + parent->first_child = node->next_sibling; + + node->parent = 0; + node->prev_sibling_c = 0; + node->next_sibling = 0; + } + + inline void append_attribute(xml_attribute_struct* attr, xml_node_struct* node) + { + xml_attribute_struct* head = node->first_attribute; + + if (head) + { + xml_attribute_struct* tail = head->prev_attribute_c; + + tail->next_attribute = attr; + attr->prev_attribute_c = tail; + head->prev_attribute_c = attr; + } + else + { + node->first_attribute = attr; + attr->prev_attribute_c = attr; + } + } + + inline void prepend_attribute(xml_attribute_struct* attr, xml_node_struct* node) + { + xml_attribute_struct* head = node->first_attribute; + + if (head) + { + attr->prev_attribute_c = head->prev_attribute_c; + head->prev_attribute_c = attr; + } + else + attr->prev_attribute_c = attr; + + attr->next_attribute = head; + node->first_attribute = attr; + } + + inline void insert_attribute_after(xml_attribute_struct* attr, xml_attribute_struct* place, xml_node_struct* node) + { + if (place->next_attribute) + place->next_attribute->prev_attribute_c = attr; + else + node->first_attribute->prev_attribute_c = attr; + + attr->next_attribute = place->next_attribute; + attr->prev_attribute_c = place; + place->next_attribute = attr; + } + + inline void insert_attribute_before(xml_attribute_struct* attr, xml_attribute_struct* place, xml_node_struct* node) + { + if (place->prev_attribute_c->next_attribute) + place->prev_attribute_c->next_attribute = attr; + else + node->first_attribute = attr; + + attr->prev_attribute_c = place->prev_attribute_c; + attr->next_attribute = place; + place->prev_attribute_c = attr; + } + + inline void remove_attribute(xml_attribute_struct* attr, xml_node_struct* node) + { + if (attr->next_attribute) + attr->next_attribute->prev_attribute_c = attr->prev_attribute_c; + else + node->first_attribute->prev_attribute_c = attr->prev_attribute_c; + + if (attr->prev_attribute_c->next_attribute) + attr->prev_attribute_c->next_attribute = attr->next_attribute; + else + node->first_attribute = attr->next_attribute; + + attr->prev_attribute_c = 0; + attr->next_attribute = 0; + } + + PUGI__FN_NO_INLINE xml_node_struct* append_new_node(xml_node_struct* node, xml_allocator& alloc, xml_node_type type = node_element) + { + if (!alloc.reserve()) return 0; + + xml_node_struct* child = allocate_node(alloc, type); + if (!child) return 0; + + append_node(child, node); + + return child; + } + + PUGI__FN_NO_INLINE xml_attribute_struct* append_new_attribute(xml_node_struct* node, xml_allocator& alloc) + { + if (!alloc.reserve()) return 0; + + xml_attribute_struct* attr = allocate_attribute(alloc); + if (!attr) return 0; + + append_attribute(attr, node); + + return attr; + } +PUGI__NS_END + +// Helper classes for code generation +PUGI__NS_BEGIN + struct opt_false + { + enum { value = 0 }; + }; + + struct opt_true + { + enum { value = 1 }; + }; +PUGI__NS_END + +// Unicode utilities +PUGI__NS_BEGIN + inline uint16_t endian_swap(uint16_t value) + { + return static_cast(((value & 0xff) << 8) | (value >> 8)); + } + + inline uint32_t endian_swap(uint32_t value) + { + return ((value & 0xff) << 24) | ((value & 0xff00) << 8) | ((value & 0xff0000) >> 8) | (value >> 24); + } + + struct utf8_counter + { + typedef size_t value_type; + + static value_type low(value_type result, uint32_t ch) + { + // U+0000..U+007F + if (ch < 0x80) return result + 1; + // U+0080..U+07FF + else if (ch < 0x800) return result + 2; + // U+0800..U+FFFF + else return result + 3; + } + + static value_type high(value_type result, uint32_t) + { + // U+10000..U+10FFFF + return result + 4; + } + }; + + struct utf8_writer + { + typedef uint8_t* value_type; + + static value_type low(value_type result, uint32_t ch) + { + // U+0000..U+007F + if (ch < 0x80) + { + *result = static_cast(ch); + return result + 1; + } + // U+0080..U+07FF + else if (ch < 0x800) + { + result[0] = static_cast(0xC0 | (ch >> 6)); + result[1] = static_cast(0x80 | (ch & 0x3F)); + return result + 2; + } + // U+0800..U+FFFF + else + { + result[0] = static_cast(0xE0 | (ch >> 12)); + result[1] = static_cast(0x80 | ((ch >> 6) & 0x3F)); + result[2] = static_cast(0x80 | (ch & 0x3F)); + return result + 3; + } + } + + static value_type high(value_type result, uint32_t ch) + { + // U+10000..U+10FFFF + result[0] = static_cast(0xF0 | (ch >> 18)); + result[1] = static_cast(0x80 | ((ch >> 12) & 0x3F)); + result[2] = static_cast(0x80 | ((ch >> 6) & 0x3F)); + result[3] = static_cast(0x80 | (ch & 0x3F)); + return result + 4; + } + + static value_type any(value_type result, uint32_t ch) + { + return (ch < 0x10000) ? low(result, ch) : high(result, ch); + } + }; + + struct utf16_counter + { + typedef size_t value_type; + + static value_type low(value_type result, uint32_t) + { + return result + 1; + } + + static value_type high(value_type result, uint32_t) + { + return result + 2; + } + }; + + struct utf16_writer + { + typedef uint16_t* value_type; + + static value_type low(value_type result, uint32_t ch) + { + *result = static_cast(ch); + + return result + 1; + } + + static value_type high(value_type result, uint32_t ch) + { + uint32_t msh = static_cast(ch - 0x10000) >> 10; + uint32_t lsh = static_cast(ch - 0x10000) & 0x3ff; + + result[0] = static_cast(0xD800 + msh); + result[1] = static_cast(0xDC00 + lsh); + + return result + 2; + } + + static value_type any(value_type result, uint32_t ch) + { + return (ch < 0x10000) ? low(result, ch) : high(result, ch); + } + }; + + struct utf32_counter + { + typedef size_t value_type; + + static value_type low(value_type result, uint32_t) + { + return result + 1; + } + + static value_type high(value_type result, uint32_t) + { + return result + 1; + } + }; + + struct utf32_writer + { + typedef uint32_t* value_type; + + static value_type low(value_type result, uint32_t ch) + { + *result = ch; + + return result + 1; + } + + static value_type high(value_type result, uint32_t ch) + { + *result = ch; + + return result + 1; + } + + static value_type any(value_type result, uint32_t ch) + { + *result = ch; + + return result + 1; + } + }; + + struct latin1_writer + { + typedef uint8_t* value_type; + + static value_type low(value_type result, uint32_t ch) + { + *result = static_cast(ch > 255 ? '?' : ch); + + return result + 1; + } + + static value_type high(value_type result, uint32_t ch) + { + (void)ch; + + *result = '?'; + + return result + 1; + } + }; + + struct utf8_decoder + { + typedef uint8_t type; + + template static inline typename Traits::value_type process(const uint8_t* data, size_t size, typename Traits::value_type result, Traits) + { + const uint8_t utf8_byte_mask = 0x3f; + + while (size) + { + uint8_t lead = *data; + + // 0xxxxxxx -> U+0000..U+007F + if (lead < 0x80) + { + result = Traits::low(result, lead); + data += 1; + size -= 1; + + // process aligned single-byte (ascii) blocks + if ((reinterpret_cast(data) & 3) == 0) + { + // round-trip through void* to silence 'cast increases required alignment of target type' warnings + while (size >= 4 && (*static_cast(static_cast(data)) & 0x80808080) == 0) + { + result = Traits::low(result, data[0]); + result = Traits::low(result, data[1]); + result = Traits::low(result, data[2]); + result = Traits::low(result, data[3]); + data += 4; + size -= 4; + } + } + } + // 110xxxxx -> U+0080..U+07FF + else if (static_cast(lead - 0xC0) < 0x20 && size >= 2 && (data[1] & 0xc0) == 0x80) + { + result = Traits::low(result, ((lead & ~0xC0) << 6) | (data[1] & utf8_byte_mask)); + data += 2; + size -= 2; + } + // 1110xxxx -> U+0800-U+FFFF + else if (static_cast(lead - 0xE0) < 0x10 && size >= 3 && (data[1] & 0xc0) == 0x80 && (data[2] & 0xc0) == 0x80) + { + result = Traits::low(result, ((lead & ~0xE0) << 12) | ((data[1] & utf8_byte_mask) << 6) | (data[2] & utf8_byte_mask)); + data += 3; + size -= 3; + } + // 11110xxx -> U+10000..U+10FFFF + else if (static_cast(lead - 0xF0) < 0x08 && size >= 4 && (data[1] & 0xc0) == 0x80 && (data[2] & 0xc0) == 0x80 && (data[3] & 0xc0) == 0x80) + { + result = Traits::high(result, ((lead & ~0xF0) << 18) | ((data[1] & utf8_byte_mask) << 12) | ((data[2] & utf8_byte_mask) << 6) | (data[3] & utf8_byte_mask)); + data += 4; + size -= 4; + } + // 10xxxxxx or 11111xxx -> invalid + else + { + data += 1; + size -= 1; + } + } + + return result; + } + }; + + template struct utf16_decoder + { + typedef uint16_t type; + + template static inline typename Traits::value_type process(const uint16_t* data, size_t size, typename Traits::value_type result, Traits) + { + while (size) + { + uint16_t lead = opt_swap::value ? endian_swap(*data) : *data; + + // U+0000..U+D7FF + if (lead < 0xD800) + { + result = Traits::low(result, lead); + data += 1; + size -= 1; + } + // U+E000..U+FFFF + else if (static_cast(lead - 0xE000) < 0x2000) + { + result = Traits::low(result, lead); + data += 1; + size -= 1; + } + // surrogate pair lead + else if (static_cast(lead - 0xD800) < 0x400 && size >= 2) + { + uint16_t next = opt_swap::value ? endian_swap(data[1]) : data[1]; + + if (static_cast(next - 0xDC00) < 0x400) + { + result = Traits::high(result, 0x10000 + ((lead & 0x3ff) << 10) + (next & 0x3ff)); + data += 2; + size -= 2; + } + else + { + data += 1; + size -= 1; + } + } + else + { + data += 1; + size -= 1; + } + } + + return result; + } + }; + + template struct utf32_decoder + { + typedef uint32_t type; + + template static inline typename Traits::value_type process(const uint32_t* data, size_t size, typename Traits::value_type result, Traits) + { + while (size) + { + uint32_t lead = opt_swap::value ? endian_swap(*data) : *data; + + // U+0000..U+FFFF + if (lead < 0x10000) + { + result = Traits::low(result, lead); + data += 1; + size -= 1; + } + // U+10000..U+10FFFF + else + { + result = Traits::high(result, lead); + data += 1; + size -= 1; + } + } + + return result; + } + }; + + struct latin1_decoder + { + typedef uint8_t type; + + template static inline typename Traits::value_type process(const uint8_t* data, size_t size, typename Traits::value_type result, Traits) + { + while (size) + { + result = Traits::low(result, *data); + data += 1; + size -= 1; + } + + return result; + } + }; + + template struct wchar_selector; + + template <> struct wchar_selector<2> + { + typedef uint16_t type; + typedef utf16_counter counter; + typedef utf16_writer writer; + typedef utf16_decoder decoder; + }; + + template <> struct wchar_selector<4> + { + typedef uint32_t type; + typedef utf32_counter counter; + typedef utf32_writer writer; + typedef utf32_decoder decoder; + }; + + typedef wchar_selector::counter wchar_counter; + typedef wchar_selector::writer wchar_writer; + + struct wchar_decoder + { + typedef wchar_t type; + + template static inline typename Traits::value_type process(const wchar_t* data, size_t size, typename Traits::value_type result, Traits traits) + { + typedef wchar_selector::decoder decoder; + + return decoder::process(reinterpret_cast(data), size, result, traits); + } + }; + +#ifdef PUGIXML_WCHAR_MODE + PUGI__FN void convert_wchar_endian_swap(wchar_t* result, const wchar_t* data, size_t length) + { + for (size_t i = 0; i < length; ++i) + result[i] = static_cast(endian_swap(static_cast::type>(data[i]))); + } +#endif +PUGI__NS_END + +PUGI__NS_BEGIN + enum chartype_t + { + ct_parse_pcdata = 1, // \0, &, \r, < + ct_parse_attr = 2, // \0, &, \r, ', " + ct_parse_attr_ws = 4, // \0, &, \r, ', ", \n, tab + ct_space = 8, // \r, \n, space, tab + ct_parse_cdata = 16, // \0, ], >, \r + ct_parse_comment = 32, // \0, -, >, \r + ct_symbol = 64, // Any symbol > 127, a-z, A-Z, 0-9, _, :, -, . + ct_start_symbol = 128 // Any symbol > 127, a-z, A-Z, _, : + }; + + static const unsigned char chartype_table[256] = + { + 55, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 0, 63, 0, 0, // 0-15 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31 + 8, 0, 6, 0, 0, 0, 7, 6, 0, 0, 0, 0, 0, 96, 64, 0, // 32-47 + 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 192, 0, 1, 0, 48, 0, // 48-63 + 0, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, // 64-79 + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 0, 0, 16, 0, 192, // 80-95 + 0, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, // 96-111 + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 0, 0, 0, 0, 0, // 112-127 + + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, // 128+ + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192 + }; + + enum chartypex_t + { + ctx_special_pcdata = 1, // Any symbol >= 0 and < 32 (except \t, \r, \n), &, <, > + ctx_special_attr = 2, // Any symbol >= 0 and < 32, &, <, ", ' + ctx_start_symbol = 4, // Any symbol > 127, a-z, A-Z, _ + ctx_digit = 8, // 0-9 + ctx_symbol = 16 // Any symbol > 127, a-z, A-Z, 0-9, _, -, . + }; + + static const unsigned char chartypex_table[256] = + { + 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3, // 0-15 + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 16-31 + 0, 0, 2, 0, 0, 0, 3, 2, 0, 0, 0, 0, 0, 16, 16, 0, // 32-47 + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 3, 0, 1, 0, // 48-63 + + 0, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, // 64-79 + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 20, // 80-95 + 0, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, // 96-111 + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, // 112-127 + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, // 128+ + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 + }; + +#ifdef PUGIXML_WCHAR_MODE + #define PUGI__IS_CHARTYPE_IMPL(c, ct, table) ((static_cast(c) < 128 ? table[static_cast(c)] : table[128]) & (ct)) +#else + #define PUGI__IS_CHARTYPE_IMPL(c, ct, table) (table[static_cast(c)] & (ct)) +#endif + + #define PUGI__IS_CHARTYPE(c, ct) PUGI__IS_CHARTYPE_IMPL(c, ct, chartype_table) + #define PUGI__IS_CHARTYPEX(c, ct) PUGI__IS_CHARTYPE_IMPL(c, ct, chartypex_table) + + PUGI__FN bool is_little_endian() + { + unsigned int ui = 1; + + return *reinterpret_cast(&ui) == 1; + } + + PUGI__FN xml_encoding get_wchar_encoding() + { + PUGI__STATIC_ASSERT(sizeof(wchar_t) == 2 || sizeof(wchar_t) == 4); + + if (sizeof(wchar_t) == 2) + return is_little_endian() ? encoding_utf16_le : encoding_utf16_be; + else + return is_little_endian() ? encoding_utf32_le : encoding_utf32_be; + } + + PUGI__FN bool parse_declaration_encoding(const uint8_t* data, size_t size, const uint8_t*& out_encoding, size_t& out_length) + { + #define PUGI__SCANCHAR(ch) { if (offset >= size || data[offset] != ch) return false; offset++; } + #define PUGI__SCANCHARTYPE(ct) { while (offset < size && PUGI__IS_CHARTYPE(data[offset], ct)) offset++; } + + // check if we have a non-empty XML declaration + if (size < 6 || !((data[0] == '<') & (data[1] == '?') & (data[2] == 'x') & (data[3] == 'm') & (data[4] == 'l') && PUGI__IS_CHARTYPE(data[5], ct_space))) + return false; + + // scan XML declaration until the encoding field + for (size_t i = 6; i + 1 < size; ++i) + { + // declaration can not contain ? in quoted values + if (data[i] == '?') + return false; + + if (data[i] == 'e' && data[i + 1] == 'n') + { + size_t offset = i; + + // encoding follows the version field which can't contain 'en' so this has to be the encoding if XML is well formed + PUGI__SCANCHAR('e'); PUGI__SCANCHAR('n'); PUGI__SCANCHAR('c'); PUGI__SCANCHAR('o'); + PUGI__SCANCHAR('d'); PUGI__SCANCHAR('i'); PUGI__SCANCHAR('n'); PUGI__SCANCHAR('g'); + + // S? = S? + PUGI__SCANCHARTYPE(ct_space); + PUGI__SCANCHAR('='); + PUGI__SCANCHARTYPE(ct_space); + + // the only two valid delimiters are ' and " + uint8_t delimiter = (offset < size && data[offset] == '"') ? '"' : '\''; + + PUGI__SCANCHAR(delimiter); + + size_t start = offset; + + out_encoding = data + offset; + + PUGI__SCANCHARTYPE(ct_symbol); + + out_length = offset - start; + + PUGI__SCANCHAR(delimiter); + + return true; + } + } + + return false; + + #undef PUGI__SCANCHAR + #undef PUGI__SCANCHARTYPE + } + + PUGI__FN xml_encoding guess_buffer_encoding(const uint8_t* data, size_t size) + { + // skip encoding autodetection if input buffer is too small + if (size < 4) return encoding_utf8; + + uint8_t d0 = data[0], d1 = data[1], d2 = data[2], d3 = data[3]; + + // look for BOM in first few bytes + if (d0 == 0 && d1 == 0 && d2 == 0xfe && d3 == 0xff) return encoding_utf32_be; + if (d0 == 0xff && d1 == 0xfe && d2 == 0 && d3 == 0) return encoding_utf32_le; + if (d0 == 0xfe && d1 == 0xff) return encoding_utf16_be; + if (d0 == 0xff && d1 == 0xfe) return encoding_utf16_le; + if (d0 == 0xef && d1 == 0xbb && d2 == 0xbf) return encoding_utf8; + + // look for <, (contents); + + return guess_buffer_encoding(data, size); + } + + PUGI__FN bool get_mutable_buffer(char_t*& out_buffer, size_t& out_length, const void* contents, size_t size, bool is_mutable) + { + size_t length = size / sizeof(char_t); + + if (is_mutable) + { + out_buffer = static_cast(const_cast(contents)); + out_length = length; + } + else + { + char_t* buffer = static_cast(xml_memory::allocate((length + 1) * sizeof(char_t))); + if (!buffer) return false; + + if (contents) + memcpy(buffer, contents, length * sizeof(char_t)); + else + assert(length == 0); + + buffer[length] = 0; + + out_buffer = buffer; + out_length = length + 1; + } + + return true; + } + +#ifdef PUGIXML_WCHAR_MODE + PUGI__FN bool need_endian_swap_utf(xml_encoding le, xml_encoding re) + { + return (le == encoding_utf16_be && re == encoding_utf16_le) || (le == encoding_utf16_le && re == encoding_utf16_be) || + (le == encoding_utf32_be && re == encoding_utf32_le) || (le == encoding_utf32_le && re == encoding_utf32_be); + } + + PUGI__FN bool convert_buffer_endian_swap(char_t*& out_buffer, size_t& out_length, const void* contents, size_t size, bool is_mutable) + { + const char_t* data = static_cast(contents); + size_t length = size / sizeof(char_t); + + if (is_mutable) + { + char_t* buffer = const_cast(data); + + convert_wchar_endian_swap(buffer, data, length); + + out_buffer = buffer; + out_length = length; + } + else + { + char_t* buffer = static_cast(xml_memory::allocate((length + 1) * sizeof(char_t))); + if (!buffer) return false; + + convert_wchar_endian_swap(buffer, data, length); + buffer[length] = 0; + + out_buffer = buffer; + out_length = length + 1; + } + + return true; + } + + template PUGI__FN bool convert_buffer_generic(char_t*& out_buffer, size_t& out_length, const void* contents, size_t size, D) + { + const typename D::type* data = static_cast(contents); + size_t data_length = size / sizeof(typename D::type); + + // first pass: get length in wchar_t units + size_t length = D::process(data, data_length, 0, wchar_counter()); + + // allocate buffer of suitable length + char_t* buffer = static_cast(xml_memory::allocate((length + 1) * sizeof(char_t))); + if (!buffer) return false; + + // second pass: convert utf16 input to wchar_t + wchar_writer::value_type obegin = reinterpret_cast(buffer); + wchar_writer::value_type oend = D::process(data, data_length, obegin, wchar_writer()); + + assert(oend == obegin + length); + *oend = 0; + + out_buffer = buffer; + out_length = length + 1; + + return true; + } + + PUGI__FN bool convert_buffer(char_t*& out_buffer, size_t& out_length, xml_encoding encoding, const void* contents, size_t size, bool is_mutable) + { + // get native encoding + xml_encoding wchar_encoding = get_wchar_encoding(); + + // fast path: no conversion required + if (encoding == wchar_encoding) + return get_mutable_buffer(out_buffer, out_length, contents, size, is_mutable); + + // only endian-swapping is required + if (need_endian_swap_utf(encoding, wchar_encoding)) + return convert_buffer_endian_swap(out_buffer, out_length, contents, size, is_mutable); + + // source encoding is utf8 + if (encoding == encoding_utf8) + return convert_buffer_generic(out_buffer, out_length, contents, size, utf8_decoder()); + + // source encoding is utf16 + if (encoding == encoding_utf16_be || encoding == encoding_utf16_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf16_le : encoding_utf16_be; + + return (native_encoding == encoding) ? + convert_buffer_generic(out_buffer, out_length, contents, size, utf16_decoder()) : + convert_buffer_generic(out_buffer, out_length, contents, size, utf16_decoder()); + } + + // source encoding is utf32 + if (encoding == encoding_utf32_be || encoding == encoding_utf32_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf32_le : encoding_utf32_be; + + return (native_encoding == encoding) ? + convert_buffer_generic(out_buffer, out_length, contents, size, utf32_decoder()) : + convert_buffer_generic(out_buffer, out_length, contents, size, utf32_decoder()); + } + + // source encoding is latin1 + if (encoding == encoding_latin1) + return convert_buffer_generic(out_buffer, out_length, contents, size, latin1_decoder()); + + assert(false && "Invalid encoding"); // unreachable + return false; + } +#else + template PUGI__FN bool convert_buffer_generic(char_t*& out_buffer, size_t& out_length, const void* contents, size_t size, D) + { + const typename D::type* data = static_cast(contents); + size_t data_length = size / sizeof(typename D::type); + + // first pass: get length in utf8 units + size_t length = D::process(data, data_length, 0, utf8_counter()); + + // allocate buffer of suitable length + char_t* buffer = static_cast(xml_memory::allocate((length + 1) * sizeof(char_t))); + if (!buffer) return false; + + // second pass: convert utf16 input to utf8 + uint8_t* obegin = reinterpret_cast(buffer); + uint8_t* oend = D::process(data, data_length, obegin, utf8_writer()); + + assert(oend == obegin + length); + *oend = 0; + + out_buffer = buffer; + out_length = length + 1; + + return true; + } + + PUGI__FN size_t get_latin1_7bit_prefix_length(const uint8_t* data, size_t size) + { + for (size_t i = 0; i < size; ++i) + if (data[i] > 127) + return i; + + return size; + } + + PUGI__FN bool convert_buffer_latin1(char_t*& out_buffer, size_t& out_length, const void* contents, size_t size, bool is_mutable) + { + const uint8_t* data = static_cast(contents); + size_t data_length = size; + + // get size of prefix that does not need utf8 conversion + size_t prefix_length = get_latin1_7bit_prefix_length(data, data_length); + assert(prefix_length <= data_length); + + const uint8_t* postfix = data + prefix_length; + size_t postfix_length = data_length - prefix_length; + + // if no conversion is needed, just return the original buffer + if (postfix_length == 0) return get_mutable_buffer(out_buffer, out_length, contents, size, is_mutable); + + // first pass: get length in utf8 units + size_t length = prefix_length + latin1_decoder::process(postfix, postfix_length, 0, utf8_counter()); + + // allocate buffer of suitable length + char_t* buffer = static_cast(xml_memory::allocate((length + 1) * sizeof(char_t))); + if (!buffer) return false; + + // second pass: convert latin1 input to utf8 + memcpy(buffer, data, prefix_length); + + uint8_t* obegin = reinterpret_cast(buffer); + uint8_t* oend = latin1_decoder::process(postfix, postfix_length, obegin + prefix_length, utf8_writer()); + + assert(oend == obegin + length); + *oend = 0; + + out_buffer = buffer; + out_length = length + 1; + + return true; + } + + PUGI__FN bool convert_buffer(char_t*& out_buffer, size_t& out_length, xml_encoding encoding, const void* contents, size_t size, bool is_mutable) + { + // fast path: no conversion required + if (encoding == encoding_utf8) + return get_mutable_buffer(out_buffer, out_length, contents, size, is_mutable); + + // source encoding is utf16 + if (encoding == encoding_utf16_be || encoding == encoding_utf16_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf16_le : encoding_utf16_be; + + return (native_encoding == encoding) ? + convert_buffer_generic(out_buffer, out_length, contents, size, utf16_decoder()) : + convert_buffer_generic(out_buffer, out_length, contents, size, utf16_decoder()); + } + + // source encoding is utf32 + if (encoding == encoding_utf32_be || encoding == encoding_utf32_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf32_le : encoding_utf32_be; + + return (native_encoding == encoding) ? + convert_buffer_generic(out_buffer, out_length, contents, size, utf32_decoder()) : + convert_buffer_generic(out_buffer, out_length, contents, size, utf32_decoder()); + } + + // source encoding is latin1 + if (encoding == encoding_latin1) + return convert_buffer_latin1(out_buffer, out_length, contents, size, is_mutable); + + assert(false && "Invalid encoding"); // unreachable + return false; + } +#endif + + PUGI__FN size_t as_utf8_begin(const wchar_t* str, size_t length) + { + // get length in utf8 characters + return wchar_decoder::process(str, length, 0, utf8_counter()); + } + + PUGI__FN void as_utf8_end(char* buffer, size_t size, const wchar_t* str, size_t length) + { + // convert to utf8 + uint8_t* begin = reinterpret_cast(buffer); + uint8_t* end = wchar_decoder::process(str, length, begin, utf8_writer()); + + assert(begin + size == end); + (void)!end; + (void)!size; + } + +#ifndef PUGIXML_NO_STL + PUGI__FN std::string as_utf8_impl(const wchar_t* str, size_t length) + { + // first pass: get length in utf8 characters + size_t size = as_utf8_begin(str, length); + + // allocate resulting string + std::string result; + result.resize(size); + + // second pass: convert to utf8 + if (size > 0) as_utf8_end(&result[0], size, str, length); + + return result; + } + + PUGI__FN std::basic_string as_wide_impl(const char* str, size_t size) + { + const uint8_t* data = reinterpret_cast(str); + + // first pass: get length in wchar_t units + size_t length = utf8_decoder::process(data, size, 0, wchar_counter()); + + // allocate resulting string + std::basic_string result; + result.resize(length); + + // second pass: convert to wchar_t + if (length > 0) + { + wchar_writer::value_type begin = reinterpret_cast(&result[0]); + wchar_writer::value_type end = utf8_decoder::process(data, size, begin, wchar_writer()); + + assert(begin + length == end); + (void)!end; + } + + return result; + } +#endif + + template + inline bool strcpy_insitu_allow(size_t length, const Header& header, uintptr_t header_mask, char_t* target) + { + // never reuse shared memory + if (header & xml_memory_page_contents_shared_mask) return false; + + size_t target_length = strlength(target); + + // always reuse document buffer memory if possible + if ((header & header_mask) == 0) return target_length >= length; + + // reuse heap memory if waste is not too great + const size_t reuse_threshold = 32; + + return target_length >= length && (target_length < reuse_threshold || target_length - length < target_length / 2); + } + + template + PUGI__FN bool strcpy_insitu(String& dest, Header& header, uintptr_t header_mask, const char_t* source, size_t source_length) + { + if (source_length == 0) + { + // empty string and null pointer are equivalent, so just deallocate old memory + xml_allocator* alloc = PUGI__GETPAGE_IMPL(header)->allocator; + + if (header & header_mask) alloc->deallocate_string(dest); + + // mark the string as not allocated + dest = 0; + header &= ~header_mask; + + return true; + } + else if (dest && strcpy_insitu_allow(source_length, header, header_mask, dest)) + { + // we can reuse old buffer, so just copy the new data (including zero terminator) + memcpy(dest, source, source_length * sizeof(char_t)); + dest[source_length] = 0; + + return true; + } + else + { + xml_allocator* alloc = PUGI__GETPAGE_IMPL(header)->allocator; + + if (!alloc->reserve()) return false; + + // allocate new buffer + char_t* buf = alloc->allocate_string(source_length + 1); + if (!buf) return false; + + // copy the string (including zero terminator) + memcpy(buf, source, source_length * sizeof(char_t)); + buf[source_length] = 0; + + // deallocate old buffer (*after* the above to protect against overlapping memory and/or allocation failures) + if (header & header_mask) alloc->deallocate_string(dest); + + // the string is now allocated, so set the flag + dest = buf; + header |= header_mask; + + return true; + } + } + + struct gap + { + char_t* end; + size_t size; + + gap(): end(0), size(0) + { + } + + // Push new gap, move s count bytes further (skipping the gap). + // Collapse previous gap. + void push(char_t*& s, size_t count) + { + if (end) // there was a gap already; collapse it + { + // Move [old_gap_end, new_gap_start) to [old_gap_start, ...) + assert(s >= end); + memmove(end - size, end, reinterpret_cast(s) - reinterpret_cast(end)); + } + + s += count; // end of current gap + + // "merge" two gaps + end = s; + size += count; + } + + // Collapse all gaps, return past-the-end pointer + char_t* flush(char_t* s) + { + if (end) + { + // Move [old_gap_end, current_pos) to [old_gap_start, ...) + assert(s >= end); + memmove(end - size, end, reinterpret_cast(s) - reinterpret_cast(end)); + + return s - size; + } + else return s; + } + }; + + PUGI__FN char_t* strconv_escape(char_t* s, gap& g) + { + char_t* stre = s + 1; + + switch (*stre) + { + case '#': // &#... + { + unsigned int ucsc = 0; + + if (stre[1] == 'x') // &#x... (hex code) + { + stre += 2; + + char_t ch = *stre; + + if (ch == ';') return stre; + + for (;;) + { + if (static_cast(ch - '0') <= 9) + ucsc = 16 * ucsc + (ch - '0'); + else if (static_cast((ch | ' ') - 'a') <= 5) + ucsc = 16 * ucsc + ((ch | ' ') - 'a' + 10); + else if (ch == ';') + break; + else // cancel + return stre; + + ch = *++stre; + } + + ++stre; + } + else // &#... (dec code) + { + char_t ch = *++stre; + + if (ch == ';') return stre; + + for (;;) + { + if (static_cast(ch - '0') <= 9) + ucsc = 10 * ucsc + (ch - '0'); + else if (ch == ';') + break; + else // cancel + return stre; + + ch = *++stre; + } + + ++stre; + } + + #ifdef PUGIXML_WCHAR_MODE + s = reinterpret_cast(wchar_writer::any(reinterpret_cast(s), ucsc)); + #else + s = reinterpret_cast(utf8_writer::any(reinterpret_cast(s), ucsc)); + #endif + + g.push(s, stre - s); + return stre; + } + + case 'a': // &a + { + ++stre; + + if (*stre == 'm') // &am + { + if (*++stre == 'p' && *++stre == ';') // & + { + *s++ = '&'; + ++stre; + + g.push(s, stre - s); + return stre; + } + } + else if (*stre == 'p') // &ap + { + if (*++stre == 'o' && *++stre == 's' && *++stre == ';') // ' + { + *s++ = '\''; + ++stre; + + g.push(s, stre - s); + return stre; + } + } + break; + } + + case 'g': // &g + { + if (*++stre == 't' && *++stre == ';') // > + { + *s++ = '>'; + ++stre; + + g.push(s, stre - s); + return stre; + } + break; + } + + case 'l': // &l + { + if (*++stre == 't' && *++stre == ';') // < + { + *s++ = '<'; + ++stre; + + g.push(s, stre - s); + return stre; + } + break; + } + + case 'q': // &q + { + if (*++stre == 'u' && *++stre == 'o' && *++stre == 't' && *++stre == ';') // " + { + *s++ = '"'; + ++stre; + + g.push(s, stre - s); + return stre; + } + break; + } + + default: + break; + } + + return stre; + } + + // Parser utilities + #define PUGI__ENDSWITH(c, e) ((c) == (e) || ((c) == 0 && endch == (e))) + #define PUGI__SKIPWS() { while (PUGI__IS_CHARTYPE(*s, ct_space)) ++s; } + #define PUGI__OPTSET(OPT) ( optmsk & (OPT) ) + #define PUGI__PUSHNODE(TYPE) { cursor = append_new_node(cursor, *alloc, TYPE); if (!cursor) PUGI__THROW_ERROR(status_out_of_memory, s); } + #define PUGI__POPNODE() { cursor = cursor->parent; } + #define PUGI__SCANFOR(X) { while (*s != 0 && !(X)) ++s; } + #define PUGI__SCANWHILE(X) { while (X) ++s; } + #define PUGI__SCANWHILE_UNROLL(X) { for (;;) { char_t ss = s[0]; if (PUGI__UNLIKELY(!(X))) { break; } ss = s[1]; if (PUGI__UNLIKELY(!(X))) { s += 1; break; } ss = s[2]; if (PUGI__UNLIKELY(!(X))) { s += 2; break; } ss = s[3]; if (PUGI__UNLIKELY(!(X))) { s += 3; break; } s += 4; } } + #define PUGI__ENDSEG() { ch = *s; *s = 0; ++s; } + #define PUGI__THROW_ERROR(err, m) return error_offset = m, error_status = err, static_cast(0) + #define PUGI__CHECK_ERROR(err, m) { if (*s == 0) PUGI__THROW_ERROR(err, m); } + + PUGI__FN char_t* strconv_comment(char_t* s, char_t endch) + { + gap g; + + while (true) + { + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPE(ss, ct_parse_comment)); + + if (*s == '\r') // Either a single 0x0d or 0x0d 0x0a pair + { + *s++ = '\n'; // replace first one with 0x0a + + if (*s == '\n') g.push(s, 1); + } + else if (s[0] == '-' && s[1] == '-' && PUGI__ENDSWITH(s[2], '>')) // comment ends here + { + *g.flush(s) = 0; + + return s + (s[2] == '>' ? 3 : 2); + } + else if (*s == 0) + { + return 0; + } + else ++s; + } + } + + PUGI__FN char_t* strconv_cdata(char_t* s, char_t endch) + { + gap g; + + while (true) + { + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPE(ss, ct_parse_cdata)); + + if (*s == '\r') // Either a single 0x0d or 0x0d 0x0a pair + { + *s++ = '\n'; // replace first one with 0x0a + + if (*s == '\n') g.push(s, 1); + } + else if (s[0] == ']' && s[1] == ']' && PUGI__ENDSWITH(s[2], '>')) // CDATA ends here + { + *g.flush(s) = 0; + + return s + 1; + } + else if (*s == 0) + { + return 0; + } + else ++s; + } + } + + typedef char_t* (*strconv_pcdata_t)(char_t*); + + template struct strconv_pcdata_impl + { + static char_t* parse(char_t* s) + { + gap g; + + char_t* begin = s; + + while (true) + { + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPE(ss, ct_parse_pcdata)); + + if (*s == '<') // PCDATA ends here + { + char_t* end = g.flush(s); + + if (opt_trim::value) + while (end > begin && PUGI__IS_CHARTYPE(end[-1], ct_space)) + --end; + + *end = 0; + + return s + 1; + } + else if (opt_eol::value && *s == '\r') // Either a single 0x0d or 0x0d 0x0a pair + { + *s++ = '\n'; // replace first one with 0x0a + + if (*s == '\n') g.push(s, 1); + } + else if (opt_escape::value && *s == '&') + { + s = strconv_escape(s, g); + } + else if (*s == 0) + { + char_t* end = g.flush(s); + + if (opt_trim::value) + while (end > begin && PUGI__IS_CHARTYPE(end[-1], ct_space)) + --end; + + *end = 0; + + return s; + } + else ++s; + } + } + }; + + PUGI__FN strconv_pcdata_t get_strconv_pcdata(unsigned int optmask) + { + PUGI__STATIC_ASSERT(parse_escapes == 0x10 && parse_eol == 0x20 && parse_trim_pcdata == 0x0800); + + switch (((optmask >> 4) & 3) | ((optmask >> 9) & 4)) // get bitmask for flags (trim eol escapes); this simultaneously checks 3 options from assertion above + { + case 0: return strconv_pcdata_impl::parse; + case 1: return strconv_pcdata_impl::parse; + case 2: return strconv_pcdata_impl::parse; + case 3: return strconv_pcdata_impl::parse; + case 4: return strconv_pcdata_impl::parse; + case 5: return strconv_pcdata_impl::parse; + case 6: return strconv_pcdata_impl::parse; + case 7: return strconv_pcdata_impl::parse; + default: assert(false); return 0; // unreachable + } + } + + typedef char_t* (*strconv_attribute_t)(char_t*, char_t); + + template struct strconv_attribute_impl + { + static char_t* parse_wnorm(char_t* s, char_t end_quote) + { + gap g; + + // trim leading whitespaces + if (PUGI__IS_CHARTYPE(*s, ct_space)) + { + char_t* str = s; + + do ++str; + while (PUGI__IS_CHARTYPE(*str, ct_space)); + + g.push(s, str - s); + } + + while (true) + { + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPE(ss, ct_parse_attr_ws | ct_space)); + + if (*s == end_quote) + { + char_t* str = g.flush(s); + + do *str-- = 0; + while (PUGI__IS_CHARTYPE(*str, ct_space)); + + return s + 1; + } + else if (PUGI__IS_CHARTYPE(*s, ct_space)) + { + *s++ = ' '; + + if (PUGI__IS_CHARTYPE(*s, ct_space)) + { + char_t* str = s + 1; + while (PUGI__IS_CHARTYPE(*str, ct_space)) ++str; + + g.push(s, str - s); + } + } + else if (opt_escape::value && *s == '&') + { + s = strconv_escape(s, g); + } + else if (!*s) + { + return 0; + } + else ++s; + } + } + + static char_t* parse_wconv(char_t* s, char_t end_quote) + { + gap g; + + while (true) + { + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPE(ss, ct_parse_attr_ws)); + + if (*s == end_quote) + { + *g.flush(s) = 0; + + return s + 1; + } + else if (PUGI__IS_CHARTYPE(*s, ct_space)) + { + if (*s == '\r') + { + *s++ = ' '; + + if (*s == '\n') g.push(s, 1); + } + else *s++ = ' '; + } + else if (opt_escape::value && *s == '&') + { + s = strconv_escape(s, g); + } + else if (!*s) + { + return 0; + } + else ++s; + } + } + + static char_t* parse_eol(char_t* s, char_t end_quote) + { + gap g; + + while (true) + { + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPE(ss, ct_parse_attr)); + + if (*s == end_quote) + { + *g.flush(s) = 0; + + return s + 1; + } + else if (*s == '\r') + { + *s++ = '\n'; + + if (*s == '\n') g.push(s, 1); + } + else if (opt_escape::value && *s == '&') + { + s = strconv_escape(s, g); + } + else if (!*s) + { + return 0; + } + else ++s; + } + } + + static char_t* parse_simple(char_t* s, char_t end_quote) + { + gap g; + + while (true) + { + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPE(ss, ct_parse_attr)); + + if (*s == end_quote) + { + *g.flush(s) = 0; + + return s + 1; + } + else if (opt_escape::value && *s == '&') + { + s = strconv_escape(s, g); + } + else if (!*s) + { + return 0; + } + else ++s; + } + } + }; + + PUGI__FN strconv_attribute_t get_strconv_attribute(unsigned int optmask) + { + PUGI__STATIC_ASSERT(parse_escapes == 0x10 && parse_eol == 0x20 && parse_wconv_attribute == 0x40 && parse_wnorm_attribute == 0x80); + + switch ((optmask >> 4) & 15) // get bitmask for flags (wnorm wconv eol escapes); this simultaneously checks 4 options from assertion above + { + case 0: return strconv_attribute_impl::parse_simple; + case 1: return strconv_attribute_impl::parse_simple; + case 2: return strconv_attribute_impl::parse_eol; + case 3: return strconv_attribute_impl::parse_eol; + case 4: return strconv_attribute_impl::parse_wconv; + case 5: return strconv_attribute_impl::parse_wconv; + case 6: return strconv_attribute_impl::parse_wconv; + case 7: return strconv_attribute_impl::parse_wconv; + case 8: return strconv_attribute_impl::parse_wnorm; + case 9: return strconv_attribute_impl::parse_wnorm; + case 10: return strconv_attribute_impl::parse_wnorm; + case 11: return strconv_attribute_impl::parse_wnorm; + case 12: return strconv_attribute_impl::parse_wnorm; + case 13: return strconv_attribute_impl::parse_wnorm; + case 14: return strconv_attribute_impl::parse_wnorm; + case 15: return strconv_attribute_impl::parse_wnorm; + default: assert(false); return 0; // unreachable + } + } + + inline xml_parse_result make_parse_result(xml_parse_status status, ptrdiff_t offset = 0) + { + xml_parse_result result; + result.status = status; + result.offset = offset; + + return result; + } + + struct xml_parser + { + xml_allocator* alloc; + char_t* error_offset; + xml_parse_status error_status; + + xml_parser(xml_allocator* alloc_): alloc(alloc_), error_offset(0), error_status(status_ok) + { + } + + // DOCTYPE consists of nested sections of the following possible types: + // , , "...", '...' + // + // + // First group can not contain nested groups + // Second group can contain nested groups of the same type + // Third group can contain all other groups + char_t* parse_doctype_primitive(char_t* s) + { + if (*s == '"' || *s == '\'') + { + // quoted string + char_t ch = *s++; + PUGI__SCANFOR(*s == ch); + if (!*s) PUGI__THROW_ERROR(status_bad_doctype, s); + + s++; + } + else if (s[0] == '<' && s[1] == '?') + { + // + s += 2; + PUGI__SCANFOR(s[0] == '?' && s[1] == '>'); // no need for ENDSWITH because ?> can't terminate proper doctype + if (!*s) PUGI__THROW_ERROR(status_bad_doctype, s); + + s += 2; + } + else if (s[0] == '<' && s[1] == '!' && s[2] == '-' && s[3] == '-') + { + s += 4; + PUGI__SCANFOR(s[0] == '-' && s[1] == '-' && s[2] == '>'); // no need for ENDSWITH because --> can't terminate proper doctype + if (!*s) PUGI__THROW_ERROR(status_bad_doctype, s); + + s += 3; + } + else PUGI__THROW_ERROR(status_bad_doctype, s); + + return s; + } + + char_t* parse_doctype_ignore(char_t* s) + { + size_t depth = 0; + + assert(s[0] == '<' && s[1] == '!' && s[2] == '['); + s += 3; + + while (*s) + { + if (s[0] == '<' && s[1] == '!' && s[2] == '[') + { + // nested ignore section + s += 3; + depth++; + } + else if (s[0] == ']' && s[1] == ']' && s[2] == '>') + { + // ignore section end + s += 3; + + if (depth == 0) + return s; + + depth--; + } + else s++; + } + + PUGI__THROW_ERROR(status_bad_doctype, s); + } + + char_t* parse_doctype_group(char_t* s, char_t endch) + { + size_t depth = 0; + + assert((s[0] == '<' || s[0] == 0) && s[1] == '!'); + s += 2; + + while (*s) + { + if (s[0] == '<' && s[1] == '!' && s[2] != '-') + { + if (s[2] == '[') + { + // ignore + s = parse_doctype_ignore(s); + if (!s) return s; + } + else + { + // some control group + s += 2; + depth++; + } + } + else if (s[0] == '<' || s[0] == '"' || s[0] == '\'') + { + // unknown tag (forbidden), or some primitive group + s = parse_doctype_primitive(s); + if (!s) return s; + } + else if (*s == '>') + { + if (depth == 0) + return s; + + depth--; + s++; + } + else s++; + } + + if (depth != 0 || endch != '>') PUGI__THROW_ERROR(status_bad_doctype, s); + + return s; + } + + char_t* parse_exclamation(char_t* s, xml_node_struct* cursor, unsigned int optmsk, char_t endch) + { + // parse node contents, starting with exclamation mark + ++s; + + if (*s == '-') // 'value = s; // Save the offset. + } + + if (PUGI__OPTSET(parse_eol) && PUGI__OPTSET(parse_comments)) + { + s = strconv_comment(s, endch); + + if (!s) PUGI__THROW_ERROR(status_bad_comment, cursor->value); + } + else + { + // Scan for terminating '-->'. + PUGI__SCANFOR(s[0] == '-' && s[1] == '-' && PUGI__ENDSWITH(s[2], '>')); + PUGI__CHECK_ERROR(status_bad_comment, s); + + if (PUGI__OPTSET(parse_comments)) + *s = 0; // Zero-terminate this segment at the first terminating '-'. + + s += (s[2] == '>' ? 3 : 2); // Step over the '\0->'. + } + } + else PUGI__THROW_ERROR(status_bad_comment, s); + } + else if (*s == '[') + { + // 'value = s; // Save the offset. + + if (PUGI__OPTSET(parse_eol)) + { + s = strconv_cdata(s, endch); + + if (!s) PUGI__THROW_ERROR(status_bad_cdata, cursor->value); + } + else + { + // Scan for terminating ']]>'. + PUGI__SCANFOR(s[0] == ']' && s[1] == ']' && PUGI__ENDSWITH(s[2], '>')); + PUGI__CHECK_ERROR(status_bad_cdata, s); + + *s++ = 0; // Zero-terminate this segment. + } + } + else // Flagged for discard, but we still have to scan for the terminator. + { + // Scan for terminating ']]>'. + PUGI__SCANFOR(s[0] == ']' && s[1] == ']' && PUGI__ENDSWITH(s[2], '>')); + PUGI__CHECK_ERROR(status_bad_cdata, s); + + ++s; + } + + s += (s[1] == '>' ? 2 : 1); // Step over the last ']>'. + } + else PUGI__THROW_ERROR(status_bad_cdata, s); + } + else if (s[0] == 'D' && s[1] == 'O' && s[2] == 'C' && s[3] == 'T' && s[4] == 'Y' && s[5] == 'P' && PUGI__ENDSWITH(s[6], 'E')) + { + s -= 2; + + if (cursor->parent) PUGI__THROW_ERROR(status_bad_doctype, s); + + char_t* mark = s + 9; + + s = parse_doctype_group(s, endch); + if (!s) return s; + + assert((*s == 0 && endch == '>') || *s == '>'); + if (*s) *s++ = 0; + + if (PUGI__OPTSET(parse_doctype)) + { + while (PUGI__IS_CHARTYPE(*mark, ct_space)) ++mark; + + PUGI__PUSHNODE(node_doctype); + + cursor->value = mark; + } + } + else if (*s == 0 && endch == '-') PUGI__THROW_ERROR(status_bad_comment, s); + else if (*s == 0 && endch == '[') PUGI__THROW_ERROR(status_bad_cdata, s); + else PUGI__THROW_ERROR(status_unrecognized_tag, s); + + return s; + } + + char_t* parse_question(char_t* s, xml_node_struct*& ref_cursor, unsigned int optmsk, char_t endch) + { + // load into registers + xml_node_struct* cursor = ref_cursor; + char_t ch = 0; + + // parse node contents, starting with question mark + ++s; + + // read PI target + char_t* target = s; + + if (!PUGI__IS_CHARTYPE(*s, ct_start_symbol)) PUGI__THROW_ERROR(status_bad_pi, s); + + PUGI__SCANWHILE(PUGI__IS_CHARTYPE(*s, ct_symbol)); + PUGI__CHECK_ERROR(status_bad_pi, s); + + // determine node type; stricmp / strcasecmp is not portable + bool declaration = (target[0] | ' ') == 'x' && (target[1] | ' ') == 'm' && (target[2] | ' ') == 'l' && target + 3 == s; + + if (declaration ? PUGI__OPTSET(parse_declaration) : PUGI__OPTSET(parse_pi)) + { + if (declaration) + { + // disallow non top-level declarations + if (cursor->parent) PUGI__THROW_ERROR(status_bad_pi, s); + + PUGI__PUSHNODE(node_declaration); + } + else + { + PUGI__PUSHNODE(node_pi); + } + + cursor->name = target; + + PUGI__ENDSEG(); + + // parse value/attributes + if (ch == '?') + { + // empty node + if (!PUGI__ENDSWITH(*s, '>')) PUGI__THROW_ERROR(status_bad_pi, s); + s += (*s == '>'); + + PUGI__POPNODE(); + } + else if (PUGI__IS_CHARTYPE(ch, ct_space)) + { + PUGI__SKIPWS(); + + // scan for tag end + char_t* value = s; + + PUGI__SCANFOR(s[0] == '?' && PUGI__ENDSWITH(s[1], '>')); + PUGI__CHECK_ERROR(status_bad_pi, s); + + if (declaration) + { + // replace ending ? with / so that 'element' terminates properly + *s = '/'; + + // we exit from this function with cursor at node_declaration, which is a signal to parse() to go to LOC_ATTRIBUTES + s = value; + } + else + { + // store value and step over > + cursor->value = value; + + PUGI__POPNODE(); + + PUGI__ENDSEG(); + + s += (*s == '>'); + } + } + else PUGI__THROW_ERROR(status_bad_pi, s); + } + else + { + // scan for tag end + PUGI__SCANFOR(s[0] == '?' && PUGI__ENDSWITH(s[1], '>')); + PUGI__CHECK_ERROR(status_bad_pi, s); + + s += (s[1] == '>' ? 2 : 1); + } + + // store from registers + ref_cursor = cursor; + + return s; + } + + char_t* parse_tree(char_t* s, xml_node_struct* root, unsigned int optmsk, char_t endch) + { + strconv_attribute_t strconv_attribute = get_strconv_attribute(optmsk); + strconv_pcdata_t strconv_pcdata = get_strconv_pcdata(optmsk); + + char_t ch = 0; + xml_node_struct* cursor = root; + char_t* mark = s; + + while (*s != 0) + { + if (*s == '<') + { + ++s; + + LOC_TAG: + if (PUGI__IS_CHARTYPE(*s, ct_start_symbol)) // '<#...' + { + PUGI__PUSHNODE(node_element); // Append a new node to the tree. + + cursor->name = s; + + PUGI__SCANWHILE_UNROLL(PUGI__IS_CHARTYPE(ss, ct_symbol)); // Scan for a terminator. + PUGI__ENDSEG(); // Save char in 'ch', terminate & step over. + + if (ch == '>') + { + // end of tag + } + else if (PUGI__IS_CHARTYPE(ch, ct_space)) + { + LOC_ATTRIBUTES: + while (true) + { + PUGI__SKIPWS(); // Eat any whitespace. + + if (PUGI__IS_CHARTYPE(*s, ct_start_symbol)) // <... #... + { + xml_attribute_struct* a = append_new_attribute(cursor, *alloc); // Make space for this attribute. + if (!a) PUGI__THROW_ERROR(status_out_of_memory, s); + + a->name = s; // Save the offset. + + PUGI__SCANWHILE_UNROLL(PUGI__IS_CHARTYPE(ss, ct_symbol)); // Scan for a terminator. + PUGI__ENDSEG(); // Save char in 'ch', terminate & step over. + + if (PUGI__IS_CHARTYPE(ch, ct_space)) + { + PUGI__SKIPWS(); // Eat any whitespace. + + ch = *s; + ++s; + } + + if (ch == '=') // '<... #=...' + { + PUGI__SKIPWS(); // Eat any whitespace. + + if (*s == '"' || *s == '\'') // '<... #="...' + { + ch = *s; // Save quote char to avoid breaking on "''" -or- '""'. + ++s; // Step over the quote. + a->value = s; // Save the offset. + + s = strconv_attribute(s, ch); + + if (!s) PUGI__THROW_ERROR(status_bad_attribute, a->value); + + // After this line the loop continues from the start; + // Whitespaces, / and > are ok, symbols and EOF are wrong, + // everything else will be detected + if (PUGI__IS_CHARTYPE(*s, ct_start_symbol)) PUGI__THROW_ERROR(status_bad_attribute, s); + } + else PUGI__THROW_ERROR(status_bad_attribute, s); + } + else PUGI__THROW_ERROR(status_bad_attribute, s); + } + else if (*s == '/') + { + ++s; + + if (*s == '>') + { + PUGI__POPNODE(); + s++; + break; + } + else if (*s == 0 && endch == '>') + { + PUGI__POPNODE(); + break; + } + else PUGI__THROW_ERROR(status_bad_start_element, s); + } + else if (*s == '>') + { + ++s; + + break; + } + else if (*s == 0 && endch == '>') + { + break; + } + else PUGI__THROW_ERROR(status_bad_start_element, s); + } + + // !!! + } + else if (ch == '/') // '<#.../' + { + if (!PUGI__ENDSWITH(*s, '>')) PUGI__THROW_ERROR(status_bad_start_element, s); + + PUGI__POPNODE(); // Pop. + + s += (*s == '>'); + } + else if (ch == 0) + { + // we stepped over null terminator, backtrack & handle closing tag + --s; + + if (endch != '>') PUGI__THROW_ERROR(status_bad_start_element, s); + } + else PUGI__THROW_ERROR(status_bad_start_element, s); + } + else if (*s == '/') + { + ++s; + + mark = s; + + char_t* name = cursor->name; + if (!name) PUGI__THROW_ERROR(status_end_element_mismatch, mark); + + while (PUGI__IS_CHARTYPE(*s, ct_symbol)) + { + if (*s++ != *name++) PUGI__THROW_ERROR(status_end_element_mismatch, mark); + } + + if (*name) + { + if (*s == 0 && name[0] == endch && name[1] == 0) PUGI__THROW_ERROR(status_bad_end_element, s); + else PUGI__THROW_ERROR(status_end_element_mismatch, mark); + } + + PUGI__POPNODE(); // Pop. + + PUGI__SKIPWS(); + + if (*s == 0) + { + if (endch != '>') PUGI__THROW_ERROR(status_bad_end_element, s); + } + else + { + if (*s != '>') PUGI__THROW_ERROR(status_bad_end_element, s); + ++s; + } + } + else if (*s == '?') // 'first_child) continue; + } + } + + if (!PUGI__OPTSET(parse_trim_pcdata)) + s = mark; + + if (cursor->parent || PUGI__OPTSET(parse_fragment)) + { + if (PUGI__OPTSET(parse_embed_pcdata) && cursor->parent && !cursor->first_child && !cursor->value) + { + cursor->value = s; // Save the offset. + } + else + { + PUGI__PUSHNODE(node_pcdata); // Append a new node on the tree. + + cursor->value = s; // Save the offset. + + PUGI__POPNODE(); // Pop since this is a standalone. + } + + s = strconv_pcdata(s); + + if (!*s) break; + } + else + { + PUGI__SCANFOR(*s == '<'); // '...<' + if (!*s) break; + + ++s; + } + + // We're after '<' + goto LOC_TAG; + } + } + + // check that last tag is closed + if (cursor != root) PUGI__THROW_ERROR(status_end_element_mismatch, s); + + return s; + } + + #ifdef PUGIXML_WCHAR_MODE + static char_t* parse_skip_bom(char_t* s) + { + unsigned int bom = 0xfeff; + return (s[0] == static_cast(bom)) ? s + 1 : s; + } + #else + static char_t* parse_skip_bom(char_t* s) + { + return (s[0] == '\xef' && s[1] == '\xbb' && s[2] == '\xbf') ? s + 3 : s; + } + #endif + + static bool has_element_node_siblings(xml_node_struct* node) + { + while (node) + { + if (PUGI__NODETYPE(node) == node_element) return true; + + node = node->next_sibling; + } + + return false; + } + + static xml_parse_result parse(char_t* buffer, size_t length, xml_document_struct* xmldoc, xml_node_struct* root, unsigned int optmsk) + { + // early-out for empty documents + if (length == 0) + return make_parse_result(PUGI__OPTSET(parse_fragment) ? status_ok : status_no_document_element); + + // get last child of the root before parsing + xml_node_struct* last_root_child = root->first_child ? root->first_child->prev_sibling_c + 0 : 0; + + // create parser on stack + xml_parser parser(static_cast(xmldoc)); + + // save last character and make buffer zero-terminated (speeds up parsing) + char_t endch = buffer[length - 1]; + buffer[length - 1] = 0; + + // skip BOM to make sure it does not end up as part of parse output + char_t* buffer_data = parse_skip_bom(buffer); + + // perform actual parsing + parser.parse_tree(buffer_data, root, optmsk, endch); + + xml_parse_result result = make_parse_result(parser.error_status, parser.error_offset ? parser.error_offset - buffer : 0); + assert(result.offset >= 0 && static_cast(result.offset) <= length); + + if (result) + { + // since we removed last character, we have to handle the only possible false positive (stray <) + if (endch == '<') + return make_parse_result(status_unrecognized_tag, length - 1); + + // check if there are any element nodes parsed + xml_node_struct* first_root_child_parsed = last_root_child ? last_root_child->next_sibling + 0 : root->first_child+ 0; + + if (!PUGI__OPTSET(parse_fragment) && !has_element_node_siblings(first_root_child_parsed)) + return make_parse_result(status_no_document_element, length - 1); + } + else + { + // roll back offset if it occurs on a null terminator in the source buffer + if (result.offset > 0 && static_cast(result.offset) == length - 1 && endch == 0) + result.offset--; + } + + return result; + } + }; + + // Output facilities + PUGI__FN xml_encoding get_write_native_encoding() + { + #ifdef PUGIXML_WCHAR_MODE + return get_wchar_encoding(); + #else + return encoding_utf8; + #endif + } + + PUGI__FN xml_encoding get_write_encoding(xml_encoding encoding) + { + // replace wchar encoding with utf implementation + if (encoding == encoding_wchar) return get_wchar_encoding(); + + // replace utf16 encoding with utf16 with specific endianness + if (encoding == encoding_utf16) return is_little_endian() ? encoding_utf16_le : encoding_utf16_be; + + // replace utf32 encoding with utf32 with specific endianness + if (encoding == encoding_utf32) return is_little_endian() ? encoding_utf32_le : encoding_utf32_be; + + // only do autodetection if no explicit encoding is requested + if (encoding != encoding_auto) return encoding; + + // assume utf8 encoding + return encoding_utf8; + } + + template PUGI__FN size_t convert_buffer_output_generic(typename T::value_type dest, const char_t* data, size_t length, D, T) + { + PUGI__STATIC_ASSERT(sizeof(char_t) == sizeof(typename D::type)); + + typename T::value_type end = D::process(reinterpret_cast(data), length, dest, T()); + + return static_cast(end - dest) * sizeof(*dest); + } + + template PUGI__FN size_t convert_buffer_output_generic(typename T::value_type dest, const char_t* data, size_t length, D, T, bool opt_swap) + { + PUGI__STATIC_ASSERT(sizeof(char_t) == sizeof(typename D::type)); + + typename T::value_type end = D::process(reinterpret_cast(data), length, dest, T()); + + if (opt_swap) + { + for (typename T::value_type i = dest; i != end; ++i) + *i = endian_swap(*i); + } + + return static_cast(end - dest) * sizeof(*dest); + } + +#ifdef PUGIXML_WCHAR_MODE + PUGI__FN size_t get_valid_length(const char_t* data, size_t length) + { + if (length < 1) return 0; + + // discard last character if it's the lead of a surrogate pair + return (sizeof(wchar_t) == 2 && static_cast(static_cast(data[length - 1]) - 0xD800) < 0x400) ? length - 1 : length; + } + + PUGI__FN size_t convert_buffer_output(char_t* r_char, uint8_t* r_u8, uint16_t* r_u16, uint32_t* r_u32, const char_t* data, size_t length, xml_encoding encoding) + { + // only endian-swapping is required + if (need_endian_swap_utf(encoding, get_wchar_encoding())) + { + convert_wchar_endian_swap(r_char, data, length); + + return length * sizeof(char_t); + } + + // convert to utf8 + if (encoding == encoding_utf8) + return convert_buffer_output_generic(r_u8, data, length, wchar_decoder(), utf8_writer()); + + // convert to utf16 + if (encoding == encoding_utf16_be || encoding == encoding_utf16_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf16_le : encoding_utf16_be; + + return convert_buffer_output_generic(r_u16, data, length, wchar_decoder(), utf16_writer(), native_encoding != encoding); + } + + // convert to utf32 + if (encoding == encoding_utf32_be || encoding == encoding_utf32_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf32_le : encoding_utf32_be; + + return convert_buffer_output_generic(r_u32, data, length, wchar_decoder(), utf32_writer(), native_encoding != encoding); + } + + // convert to latin1 + if (encoding == encoding_latin1) + return convert_buffer_output_generic(r_u8, data, length, wchar_decoder(), latin1_writer()); + + assert(false && "Invalid encoding"); // unreachable + return 0; + } +#else + PUGI__FN size_t get_valid_length(const char_t* data, size_t length) + { + if (length < 5) return 0; + + for (size_t i = 1; i <= 4; ++i) + { + uint8_t ch = static_cast(data[length - i]); + + // either a standalone character or a leading one + if ((ch & 0xc0) != 0x80) return length - i; + } + + // there are four non-leading characters at the end, sequence tail is broken so might as well process the whole chunk + return length; + } + + PUGI__FN size_t convert_buffer_output(char_t* /* r_char */, uint8_t* r_u8, uint16_t* r_u16, uint32_t* r_u32, const char_t* data, size_t length, xml_encoding encoding) + { + if (encoding == encoding_utf16_be || encoding == encoding_utf16_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf16_le : encoding_utf16_be; + + return convert_buffer_output_generic(r_u16, data, length, utf8_decoder(), utf16_writer(), native_encoding != encoding); + } + + if (encoding == encoding_utf32_be || encoding == encoding_utf32_le) + { + xml_encoding native_encoding = is_little_endian() ? encoding_utf32_le : encoding_utf32_be; + + return convert_buffer_output_generic(r_u32, data, length, utf8_decoder(), utf32_writer(), native_encoding != encoding); + } + + if (encoding == encoding_latin1) + return convert_buffer_output_generic(r_u8, data, length, utf8_decoder(), latin1_writer()); + + assert(false && "Invalid encoding"); // unreachable + return 0; + } +#endif + + class xml_buffered_writer + { + xml_buffered_writer(const xml_buffered_writer&); + xml_buffered_writer& operator=(const xml_buffered_writer&); + + public: + xml_buffered_writer(xml_writer& writer_, xml_encoding user_encoding): writer(writer_), bufsize(0), encoding(get_write_encoding(user_encoding)) + { + PUGI__STATIC_ASSERT(bufcapacity >= 8); + } + + size_t flush() + { + flush(buffer, bufsize); + bufsize = 0; + return 0; + } + + void flush(const char_t* data, size_t size) + { + if (size == 0) return; + + // fast path, just write data + if (encoding == get_write_native_encoding()) + writer.write(data, size * sizeof(char_t)); + else + { + // convert chunk + size_t result = convert_buffer_output(scratch.data_char, scratch.data_u8, scratch.data_u16, scratch.data_u32, data, size, encoding); + assert(result <= sizeof(scratch)); + + // write data + writer.write(scratch.data_u8, result); + } + } + + void write_direct(const char_t* data, size_t length) + { + // flush the remaining buffer contents + flush(); + + // handle large chunks + if (length > bufcapacity) + { + if (encoding == get_write_native_encoding()) + { + // fast path, can just write data chunk + writer.write(data, length * sizeof(char_t)); + return; + } + + // need to convert in suitable chunks + while (length > bufcapacity) + { + // get chunk size by selecting such number of characters that are guaranteed to fit into scratch buffer + // and form a complete codepoint sequence (i.e. discard start of last codepoint if necessary) + size_t chunk_size = get_valid_length(data, bufcapacity); + assert(chunk_size); + + // convert chunk and write + flush(data, chunk_size); + + // iterate + data += chunk_size; + length -= chunk_size; + } + + // small tail is copied below + bufsize = 0; + } + + memcpy(buffer + bufsize, data, length * sizeof(char_t)); + bufsize += length; + } + + void write_buffer(const char_t* data, size_t length) + { + size_t offset = bufsize; + + if (offset + length <= bufcapacity) + { + memcpy(buffer + offset, data, length * sizeof(char_t)); + bufsize = offset + length; + } + else + { + write_direct(data, length); + } + } + + void write_string(const char_t* data) + { + // write the part of the string that fits in the buffer + size_t offset = bufsize; + + while (*data && offset < bufcapacity) + buffer[offset++] = *data++; + + // write the rest + if (offset < bufcapacity) + { + bufsize = offset; + } + else + { + // backtrack a bit if we have split the codepoint + size_t length = offset - bufsize; + size_t extra = length - get_valid_length(data - length, length); + + bufsize = offset - extra; + + write_direct(data - extra, strlength(data) + extra); + } + } + + void write(char_t d0) + { + size_t offset = bufsize; + if (offset > bufcapacity - 1) offset = flush(); + + buffer[offset + 0] = d0; + bufsize = offset + 1; + } + + void write(char_t d0, char_t d1) + { + size_t offset = bufsize; + if (offset > bufcapacity - 2) offset = flush(); + + buffer[offset + 0] = d0; + buffer[offset + 1] = d1; + bufsize = offset + 2; + } + + void write(char_t d0, char_t d1, char_t d2) + { + size_t offset = bufsize; + if (offset > bufcapacity - 3) offset = flush(); + + buffer[offset + 0] = d0; + buffer[offset + 1] = d1; + buffer[offset + 2] = d2; + bufsize = offset + 3; + } + + void write(char_t d0, char_t d1, char_t d2, char_t d3) + { + size_t offset = bufsize; + if (offset > bufcapacity - 4) offset = flush(); + + buffer[offset + 0] = d0; + buffer[offset + 1] = d1; + buffer[offset + 2] = d2; + buffer[offset + 3] = d3; + bufsize = offset + 4; + } + + void write(char_t d0, char_t d1, char_t d2, char_t d3, char_t d4) + { + size_t offset = bufsize; + if (offset > bufcapacity - 5) offset = flush(); + + buffer[offset + 0] = d0; + buffer[offset + 1] = d1; + buffer[offset + 2] = d2; + buffer[offset + 3] = d3; + buffer[offset + 4] = d4; + bufsize = offset + 5; + } + + void write(char_t d0, char_t d1, char_t d2, char_t d3, char_t d4, char_t d5) + { + size_t offset = bufsize; + if (offset > bufcapacity - 6) offset = flush(); + + buffer[offset + 0] = d0; + buffer[offset + 1] = d1; + buffer[offset + 2] = d2; + buffer[offset + 3] = d3; + buffer[offset + 4] = d4; + buffer[offset + 5] = d5; + bufsize = offset + 6; + } + + // utf8 maximum expansion: x4 (-> utf32) + // utf16 maximum expansion: x2 (-> utf32) + // utf32 maximum expansion: x1 + enum + { + bufcapacitybytes = + #ifdef PUGIXML_MEMORY_OUTPUT_STACK + PUGIXML_MEMORY_OUTPUT_STACK + #else + 10240 + #endif + , + bufcapacity = bufcapacitybytes / (sizeof(char_t) + 4) + }; + + char_t buffer[bufcapacity]; + + union + { + uint8_t data_u8[4 * bufcapacity]; + uint16_t data_u16[2 * bufcapacity]; + uint32_t data_u32[bufcapacity]; + char_t data_char[bufcapacity]; + } scratch; + + xml_writer& writer; + size_t bufsize; + xml_encoding encoding; + }; + + PUGI__FN void text_output_escaped(xml_buffered_writer& writer, const char_t* s, chartypex_t type, unsigned int flags) + { + while (*s) + { + const char_t* prev = s; + + // While *s is a usual symbol + PUGI__SCANWHILE_UNROLL(!PUGI__IS_CHARTYPEX(ss, type)); + + writer.write_buffer(prev, static_cast(s - prev)); + + switch (*s) + { + case 0: break; + case '&': + writer.write('&', 'a', 'm', 'p', ';'); + ++s; + break; + case '<': + writer.write('&', 'l', 't', ';'); + ++s; + break; + case '>': + writer.write('&', 'g', 't', ';'); + ++s; + break; + case '"': + if (flags & format_attribute_single_quote) + writer.write('"'); + else + writer.write('&', 'q', 'u', 'o', 't', ';'); + ++s; + break; + case '\'': + if (flags & format_attribute_single_quote) + writer.write('&', 'a', 'p', 'o', 's', ';'); + else + writer.write('\''); + ++s; + break; + default: // s is not a usual symbol + { + unsigned int ch = static_cast(*s++); + assert(ch < 32); + + if (!(flags & format_skip_control_chars)) + writer.write('&', '#', static_cast((ch / 10) + '0'), static_cast((ch % 10) + '0'), ';'); + } + } + } + } + + PUGI__FN void text_output(xml_buffered_writer& writer, const char_t* s, chartypex_t type, unsigned int flags) + { + if (flags & format_no_escapes) + writer.write_string(s); + else + text_output_escaped(writer, s, type, flags); + } + + PUGI__FN void text_output_cdata(xml_buffered_writer& writer, const char_t* s) + { + do + { + writer.write('<', '!', '[', 'C', 'D'); + writer.write('A', 'T', 'A', '['); + + const char_t* prev = s; + + // look for ]]> sequence - we can't output it as is since it terminates CDATA + while (*s && !(s[0] == ']' && s[1] == ']' && s[2] == '>')) ++s; + + // skip ]] if we stopped at ]]>, > will go to the next CDATA section + if (*s) s += 2; + + writer.write_buffer(prev, static_cast(s - prev)); + + writer.write(']', ']', '>'); + } + while (*s); + } + + PUGI__FN void text_output_indent(xml_buffered_writer& writer, const char_t* indent, size_t indent_length, unsigned int depth) + { + switch (indent_length) + { + case 1: + { + for (unsigned int i = 0; i < depth; ++i) + writer.write(indent[0]); + break; + } + + case 2: + { + for (unsigned int i = 0; i < depth; ++i) + writer.write(indent[0], indent[1]); + break; + } + + case 3: + { + for (unsigned int i = 0; i < depth; ++i) + writer.write(indent[0], indent[1], indent[2]); + break; + } + + case 4: + { + for (unsigned int i = 0; i < depth; ++i) + writer.write(indent[0], indent[1], indent[2], indent[3]); + break; + } + + default: + { + for (unsigned int i = 0; i < depth; ++i) + writer.write_buffer(indent, indent_length); + } + } + } + + PUGI__FN void node_output_comment(xml_buffered_writer& writer, const char_t* s) + { + writer.write('<', '!', '-', '-'); + + while (*s) + { + const char_t* prev = s; + + // look for -\0 or -- sequence - we can't output it since -- is illegal in comment body + while (*s && !(s[0] == '-' && (s[1] == '-' || s[1] == 0))) ++s; + + writer.write_buffer(prev, static_cast(s - prev)); + + if (*s) + { + assert(*s == '-'); + + writer.write('-', ' '); + ++s; + } + } + + writer.write('-', '-', '>'); + } + + PUGI__FN void node_output_pi_value(xml_buffered_writer& writer, const char_t* s) + { + while (*s) + { + const char_t* prev = s; + + // look for ?> sequence - we can't output it since ?> terminates PI + while (*s && !(s[0] == '?' && s[1] == '>')) ++s; + + writer.write_buffer(prev, static_cast(s - prev)); + + if (*s) + { + assert(s[0] == '?' && s[1] == '>'); + + writer.write('?', ' ', '>'); + s += 2; + } + } + } + + PUGI__FN void node_output_attributes(xml_buffered_writer& writer, xml_node_struct* node, const char_t* indent, size_t indent_length, unsigned int flags, unsigned int depth) + { + const char_t* default_name = PUGIXML_TEXT(":anonymous"); + const char_t enquotation_char = (flags & format_attribute_single_quote) ? '\'' : '"'; + + for (xml_attribute_struct* a = node->first_attribute; a; a = a->next_attribute) + { + if ((flags & (format_indent_attributes | format_raw)) == format_indent_attributes) + { + writer.write('\n'); + + text_output_indent(writer, indent, indent_length, depth + 1); + } + else + { + writer.write(' '); + } + + writer.write_string(a->name ? a->name + 0 : default_name); + writer.write('=', enquotation_char); + + if (a->value) + text_output(writer, a->value, ctx_special_attr, flags); + + writer.write(enquotation_char); + } + } + + PUGI__FN bool node_output_start(xml_buffered_writer& writer, xml_node_struct* node, const char_t* indent, size_t indent_length, unsigned int flags, unsigned int depth) + { + const char_t* default_name = PUGIXML_TEXT(":anonymous"); + const char_t* name = node->name ? node->name + 0 : default_name; + + writer.write('<'); + writer.write_string(name); + + if (node->first_attribute) + node_output_attributes(writer, node, indent, indent_length, flags, depth); + + // element nodes can have value if parse_embed_pcdata was used + if (!node->value) + { + if (!node->first_child) + { + if (flags & format_no_empty_element_tags) + { + writer.write('>', '<', '/'); + writer.write_string(name); + writer.write('>'); + + return false; + } + else + { + if ((flags & format_raw) == 0) + writer.write(' '); + + writer.write('/', '>'); + + return false; + } + } + else + { + writer.write('>'); + + return true; + } + } + else + { + writer.write('>'); + + text_output(writer, node->value, ctx_special_pcdata, flags); + + if (!node->first_child) + { + writer.write('<', '/'); + writer.write_string(name); + writer.write('>'); + + return false; + } + else + { + return true; + } + } + } + + PUGI__FN void node_output_end(xml_buffered_writer& writer, xml_node_struct* node) + { + const char_t* default_name = PUGIXML_TEXT(":anonymous"); + const char_t* name = node->name ? node->name + 0 : default_name; + + writer.write('<', '/'); + writer.write_string(name); + writer.write('>'); + } + + PUGI__FN void node_output_simple(xml_buffered_writer& writer, xml_node_struct* node, unsigned int flags) + { + const char_t* default_name = PUGIXML_TEXT(":anonymous"); + + switch (PUGI__NODETYPE(node)) + { + case node_pcdata: + text_output(writer, node->value ? node->value + 0 : PUGIXML_TEXT(""), ctx_special_pcdata, flags); + break; + + case node_cdata: + text_output_cdata(writer, node->value ? node->value + 0 : PUGIXML_TEXT("")); + break; + + case node_comment: + node_output_comment(writer, node->value ? node->value + 0 : PUGIXML_TEXT("")); + break; + + case node_pi: + writer.write('<', '?'); + writer.write_string(node->name ? node->name + 0 : default_name); + + if (node->value) + { + writer.write(' '); + node_output_pi_value(writer, node->value); + } + + writer.write('?', '>'); + break; + + case node_declaration: + writer.write('<', '?'); + writer.write_string(node->name ? node->name + 0 : default_name); + node_output_attributes(writer, node, PUGIXML_TEXT(""), 0, flags | format_raw, 0); + writer.write('?', '>'); + break; + + case node_doctype: + writer.write('<', '!', 'D', 'O', 'C'); + writer.write('T', 'Y', 'P', 'E'); + + if (node->value) + { + writer.write(' '); + writer.write_string(node->value); + } + + writer.write('>'); + break; + + default: + assert(false && "Invalid node type"); // unreachable + } + } + + enum indent_flags_t + { + indent_newline = 1, + indent_indent = 2 + }; + + PUGI__FN void node_output(xml_buffered_writer& writer, xml_node_struct* root, const char_t* indent, unsigned int flags, unsigned int depth) + { + size_t indent_length = ((flags & (format_indent | format_indent_attributes)) && (flags & format_raw) == 0) ? strlength(indent) : 0; + unsigned int indent_flags = indent_indent; + + xml_node_struct* node = root; + + do + { + assert(node); + + // begin writing current node + if (PUGI__NODETYPE(node) == node_pcdata || PUGI__NODETYPE(node) == node_cdata) + { + node_output_simple(writer, node, flags); + + indent_flags = 0; + } + else + { + if ((indent_flags & indent_newline) && (flags & format_raw) == 0) + writer.write('\n'); + + if ((indent_flags & indent_indent) && indent_length) + text_output_indent(writer, indent, indent_length, depth); + + if (PUGI__NODETYPE(node) == node_element) + { + indent_flags = indent_newline | indent_indent; + + if (node_output_start(writer, node, indent, indent_length, flags, depth)) + { + // element nodes can have value if parse_embed_pcdata was used + if (node->value) + indent_flags = 0; + + node = node->first_child; + depth++; + continue; + } + } + else if (PUGI__NODETYPE(node) == node_document) + { + indent_flags = indent_indent; + + if (node->first_child) + { + node = node->first_child; + continue; + } + } + else + { + node_output_simple(writer, node, flags); + + indent_flags = indent_newline | indent_indent; + } + } + + // continue to the next node + while (node != root) + { + if (node->next_sibling) + { + node = node->next_sibling; + break; + } + + node = node->parent; + + // write closing node + if (PUGI__NODETYPE(node) == node_element) + { + depth--; + + if ((indent_flags & indent_newline) && (flags & format_raw) == 0) + writer.write('\n'); + + if ((indent_flags & indent_indent) && indent_length) + text_output_indent(writer, indent, indent_length, depth); + + node_output_end(writer, node); + + indent_flags = indent_newline | indent_indent; + } + } + } + while (node != root); + + if ((indent_flags & indent_newline) && (flags & format_raw) == 0) + writer.write('\n'); + } + + PUGI__FN bool has_declaration(xml_node_struct* node) + { + for (xml_node_struct* child = node->first_child; child; child = child->next_sibling) + { + xml_node_type type = PUGI__NODETYPE(child); + + if (type == node_declaration) return true; + if (type == node_element) return false; + } + + return false; + } + + PUGI__FN bool is_attribute_of(xml_attribute_struct* attr, xml_node_struct* node) + { + for (xml_attribute_struct* a = node->first_attribute; a; a = a->next_attribute) + if (a == attr) + return true; + + return false; + } + + PUGI__FN bool allow_insert_attribute(xml_node_type parent) + { + return parent == node_element || parent == node_declaration; + } + + PUGI__FN bool allow_insert_child(xml_node_type parent, xml_node_type child) + { + if (parent != node_document && parent != node_element) return false; + if (child == node_document || child == node_null) return false; + if (parent != node_document && (child == node_declaration || child == node_doctype)) return false; + + return true; + } + + PUGI__FN bool allow_move(xml_node parent, xml_node child) + { + // check that child can be a child of parent + if (!allow_insert_child(parent.type(), child.type())) + return false; + + // check that node is not moved between documents + if (parent.root() != child.root()) + return false; + + // check that new parent is not in the child subtree + xml_node cur = parent; + + while (cur) + { + if (cur == child) + return false; + + cur = cur.parent(); + } + + return true; + } + + template + PUGI__FN void node_copy_string(String& dest, Header& header, uintptr_t header_mask, char_t* source, Header& source_header, xml_allocator* alloc) + { + assert(!dest && (header & header_mask) == 0); + + if (source) + { + if (alloc && (source_header & header_mask) == 0) + { + dest = source; + + // since strcpy_insitu can reuse document buffer memory we need to mark both source and dest as shared + header |= xml_memory_page_contents_shared_mask; + source_header |= xml_memory_page_contents_shared_mask; + } + else + strcpy_insitu(dest, header, header_mask, source, strlength(source)); + } + } + + PUGI__FN void node_copy_contents(xml_node_struct* dn, xml_node_struct* sn, xml_allocator* shared_alloc) + { + node_copy_string(dn->name, dn->header, xml_memory_page_name_allocated_mask, sn->name, sn->header, shared_alloc); + node_copy_string(dn->value, dn->header, xml_memory_page_value_allocated_mask, sn->value, sn->header, shared_alloc); + + for (xml_attribute_struct* sa = sn->first_attribute; sa; sa = sa->next_attribute) + { + xml_attribute_struct* da = append_new_attribute(dn, get_allocator(dn)); + + if (da) + { + node_copy_string(da->name, da->header, xml_memory_page_name_allocated_mask, sa->name, sa->header, shared_alloc); + node_copy_string(da->value, da->header, xml_memory_page_value_allocated_mask, sa->value, sa->header, shared_alloc); + } + } + } + + PUGI__FN void node_copy_tree(xml_node_struct* dn, xml_node_struct* sn) + { + xml_allocator& alloc = get_allocator(dn); + xml_allocator* shared_alloc = (&alloc == &get_allocator(sn)) ? &alloc : 0; + + node_copy_contents(dn, sn, shared_alloc); + + xml_node_struct* dit = dn; + xml_node_struct* sit = sn->first_child; + + while (sit && sit != sn) + { + // loop invariant: dit is inside the subtree rooted at dn + assert(dit); + + // when a tree is copied into one of the descendants, we need to skip that subtree to avoid an infinite loop + if (sit != dn) + { + xml_node_struct* copy = append_new_node(dit, alloc, PUGI__NODETYPE(sit)); + + if (copy) + { + node_copy_contents(copy, sit, shared_alloc); + + if (sit->first_child) + { + dit = copy; + sit = sit->first_child; + continue; + } + } + } + + // continue to the next node + do + { + if (sit->next_sibling) + { + sit = sit->next_sibling; + break; + } + + sit = sit->parent; + dit = dit->parent; + + // loop invariant: dit is inside the subtree rooted at dn while sit is inside sn + assert(sit == sn || dit); + } + while (sit != sn); + } + + assert(!sit || dit == dn->parent); + } + + PUGI__FN void node_copy_attribute(xml_attribute_struct* da, xml_attribute_struct* sa) + { + xml_allocator& alloc = get_allocator(da); + xml_allocator* shared_alloc = (&alloc == &get_allocator(sa)) ? &alloc : 0; + + node_copy_string(da->name, da->header, xml_memory_page_name_allocated_mask, sa->name, sa->header, shared_alloc); + node_copy_string(da->value, da->header, xml_memory_page_value_allocated_mask, sa->value, sa->header, shared_alloc); + } + + inline bool is_text_node(xml_node_struct* node) + { + xml_node_type type = PUGI__NODETYPE(node); + + return type == node_pcdata || type == node_cdata; + } + + // get value with conversion functions + template PUGI__FN PUGI__UNSIGNED_OVERFLOW U string_to_integer(const char_t* value, U minv, U maxv) + { + U result = 0; + const char_t* s = value; + + while (PUGI__IS_CHARTYPE(*s, ct_space)) + s++; + + bool negative = (*s == '-'); + + s += (*s == '+' || *s == '-'); + + bool overflow = false; + + if (s[0] == '0' && (s[1] | ' ') == 'x') + { + s += 2; + + // since overflow detection relies on length of the sequence skip leading zeros + while (*s == '0') + s++; + + const char_t* start = s; + + for (;;) + { + if (static_cast(*s - '0') < 10) + result = result * 16 + (*s - '0'); + else if (static_cast((*s | ' ') - 'a') < 6) + result = result * 16 + ((*s | ' ') - 'a' + 10); + else + break; + + s++; + } + + size_t digits = static_cast(s - start); + + overflow = digits > sizeof(U) * 2; + } + else + { + // since overflow detection relies on length of the sequence skip leading zeros + while (*s == '0') + s++; + + const char_t* start = s; + + for (;;) + { + if (static_cast(*s - '0') < 10) + result = result * 10 + (*s - '0'); + else + break; + + s++; + } + + size_t digits = static_cast(s - start); + + PUGI__STATIC_ASSERT(sizeof(U) == 8 || sizeof(U) == 4 || sizeof(U) == 2); + + const size_t max_digits10 = sizeof(U) == 8 ? 20 : sizeof(U) == 4 ? 10 : 5; + const char_t max_lead = sizeof(U) == 8 ? '1' : sizeof(U) == 4 ? '4' : '6'; + const size_t high_bit = sizeof(U) * 8 - 1; + + overflow = digits >= max_digits10 && !(digits == max_digits10 && (*start < max_lead || (*start == max_lead && result >> high_bit))); + } + + if (negative) + { + // Workaround for crayc++ CC-3059: Expected no overflow in routine. + #ifdef _CRAYC + return (overflow || result > ~minv + 1) ? minv : ~result + 1; + #else + return (overflow || result > 0 - minv) ? minv : 0 - result; + #endif + } + else + return (overflow || result > maxv) ? maxv : result; + } + + PUGI__FN int get_value_int(const char_t* value) + { + return string_to_integer(value, static_cast(INT_MIN), INT_MAX); + } + + PUGI__FN unsigned int get_value_uint(const char_t* value) + { + return string_to_integer(value, 0, UINT_MAX); + } + + PUGI__FN double get_value_double(const char_t* value) + { + #ifdef PUGIXML_WCHAR_MODE + return wcstod(value, 0); + #else + return strtod(value, 0); + #endif + } + + PUGI__FN float get_value_float(const char_t* value) + { + #ifdef PUGIXML_WCHAR_MODE + return static_cast(wcstod(value, 0)); + #else + return static_cast(strtod(value, 0)); + #endif + } + + PUGI__FN bool get_value_bool(const char_t* value) + { + // only look at first char + char_t first = *value; + + // 1*, t* (true), T* (True), y* (yes), Y* (YES) + return (first == '1' || first == 't' || first == 'T' || first == 'y' || first == 'Y'); + } + +#ifdef PUGIXML_HAS_LONG_LONG + PUGI__FN long long get_value_llong(const char_t* value) + { + return string_to_integer(value, static_cast(LLONG_MIN), LLONG_MAX); + } + + PUGI__FN unsigned long long get_value_ullong(const char_t* value) + { + return string_to_integer(value, 0, ULLONG_MAX); + } +#endif + + template PUGI__FN PUGI__UNSIGNED_OVERFLOW char_t* integer_to_string(char_t* begin, char_t* end, U value, bool negative) + { + char_t* result = end - 1; + U rest = negative ? 0 - value : value; + + do + { + *result-- = static_cast('0' + (rest % 10)); + rest /= 10; + } + while (rest); + + assert(result >= begin); + (void)begin; + + *result = '-'; + + return result + !negative; + } + + // set value with conversion functions + template + PUGI__FN bool set_value_ascii(String& dest, Header& header, uintptr_t header_mask, char* buf) + { + #ifdef PUGIXML_WCHAR_MODE + char_t wbuf[128]; + assert(strlen(buf) < sizeof(wbuf) / sizeof(wbuf[0])); + + size_t offset = 0; + for (; buf[offset]; ++offset) wbuf[offset] = buf[offset]; + + return strcpy_insitu(dest, header, header_mask, wbuf, offset); + #else + return strcpy_insitu(dest, header, header_mask, buf, strlen(buf)); + #endif + } + + template + PUGI__FN bool set_value_integer(String& dest, Header& header, uintptr_t header_mask, U value, bool negative) + { + char_t buf[64]; + char_t* end = buf + sizeof(buf) / sizeof(buf[0]); + char_t* begin = integer_to_string(buf, end, value, negative); + + return strcpy_insitu(dest, header, header_mask, begin, end - begin); + } + + template + PUGI__FN bool set_value_convert(String& dest, Header& header, uintptr_t header_mask, float value, int precision) + { + char buf[128]; + PUGI__SNPRINTF(buf, "%.*g", precision, double(value)); + + return set_value_ascii(dest, header, header_mask, buf); + } + + template + PUGI__FN bool set_value_convert(String& dest, Header& header, uintptr_t header_mask, double value, int precision) + { + char buf[128]; + PUGI__SNPRINTF(buf, "%.*g", precision, value); + + return set_value_ascii(dest, header, header_mask, buf); + } + + template + PUGI__FN bool set_value_bool(String& dest, Header& header, uintptr_t header_mask, bool value) + { + return strcpy_insitu(dest, header, header_mask, value ? PUGIXML_TEXT("true") : PUGIXML_TEXT("false"), value ? 4 : 5); + } + + PUGI__FN xml_parse_result load_buffer_impl(xml_document_struct* doc, xml_node_struct* root, void* contents, size_t size, unsigned int options, xml_encoding encoding, bool is_mutable, bool own, char_t** out_buffer) + { + // check input buffer + if (!contents && size) return make_parse_result(status_io_error); + + // get actual encoding + xml_encoding buffer_encoding = impl::get_buffer_encoding(encoding, contents, size); + + // get private buffer + char_t* buffer = 0; + size_t length = 0; + + // coverity[var_deref_model] + if (!impl::convert_buffer(buffer, length, buffer_encoding, contents, size, is_mutable)) return impl::make_parse_result(status_out_of_memory); + + // delete original buffer if we performed a conversion + if (own && buffer != contents && contents) impl::xml_memory::deallocate(contents); + + // grab onto buffer if it's our buffer, user is responsible for deallocating contents himself + if (own || buffer != contents) *out_buffer = buffer; + + // store buffer for offset_debug + doc->buffer = buffer; + + // parse + xml_parse_result res = impl::xml_parser::parse(buffer, length, doc, root, options); + + // remember encoding + res.encoding = buffer_encoding; + + return res; + } + + // we need to get length of entire file to load it in memory; the only (relatively) sane way to do it is via seek/tell trick + PUGI__FN xml_parse_status get_file_size(FILE* file, size_t& out_result) + { + #if defined(PUGI__MSVC_CRT_VERSION) && PUGI__MSVC_CRT_VERSION >= 1400 + // there are 64-bit versions of fseek/ftell, let's use them + typedef __int64 length_type; + + _fseeki64(file, 0, SEEK_END); + length_type length = _ftelli64(file); + _fseeki64(file, 0, SEEK_SET); + #elif defined(__MINGW32__) && !defined(__NO_MINGW_LFS) && (!defined(__STRICT_ANSI__) || defined(__MINGW64_VERSION_MAJOR)) + // there are 64-bit versions of fseek/ftell, let's use them + typedef off64_t length_type; + + fseeko64(file, 0, SEEK_END); + length_type length = ftello64(file); + fseeko64(file, 0, SEEK_SET); + #else + // if this is a 32-bit OS, long is enough; if this is a unix system, long is 64-bit, which is enough; otherwise we can't do anything anyway. + typedef long length_type; + + fseek(file, 0, SEEK_END); + length_type length = ftell(file); + fseek(file, 0, SEEK_SET); + #endif + + // check for I/O errors + if (length < 0) return status_io_error; + + // check for overflow + size_t result = static_cast(length); + + if (static_cast(result) != length) return status_out_of_memory; + + // finalize + out_result = result; + + return status_ok; + } + + // This function assumes that buffer has extra sizeof(char_t) writable bytes after size + PUGI__FN size_t zero_terminate_buffer(void* buffer, size_t size, xml_encoding encoding) + { + // We only need to zero-terminate if encoding conversion does not do it for us + #ifdef PUGIXML_WCHAR_MODE + xml_encoding wchar_encoding = get_wchar_encoding(); + + if (encoding == wchar_encoding || need_endian_swap_utf(encoding, wchar_encoding)) + { + size_t length = size / sizeof(char_t); + + static_cast(buffer)[length] = 0; + return (length + 1) * sizeof(char_t); + } + #else + if (encoding == encoding_utf8) + { + static_cast(buffer)[size] = 0; + return size + 1; + } + #endif + + return size; + } + + PUGI__FN xml_parse_result load_file_impl(xml_document_struct* doc, FILE* file, unsigned int options, xml_encoding encoding, char_t** out_buffer) + { + if (!file) return make_parse_result(status_file_not_found); + + // get file size (can result in I/O errors) + size_t size = 0; + xml_parse_status size_status = get_file_size(file, size); + if (size_status != status_ok) return make_parse_result(size_status); + + size_t max_suffix_size = sizeof(char_t); + + // allocate buffer for the whole file + char* contents = static_cast(xml_memory::allocate(size + max_suffix_size)); + if (!contents) return make_parse_result(status_out_of_memory); + + // read file in memory + size_t read_size = fread(contents, 1, size, file); + + if (read_size != size) + { + xml_memory::deallocate(contents); + return make_parse_result(status_io_error); + } + + xml_encoding real_encoding = get_buffer_encoding(encoding, contents, size); + + return load_buffer_impl(doc, doc, contents, zero_terminate_buffer(contents, size, real_encoding), options, real_encoding, true, true, out_buffer); + } + + PUGI__FN void close_file(FILE* file) + { + fclose(file); + } + +#ifndef PUGIXML_NO_STL + template struct xml_stream_chunk + { + static xml_stream_chunk* create() + { + void* memory = xml_memory::allocate(sizeof(xml_stream_chunk)); + if (!memory) return 0; + + return new (memory) xml_stream_chunk(); + } + + static void destroy(xml_stream_chunk* chunk) + { + // free chunk chain + while (chunk) + { + xml_stream_chunk* next_ = chunk->next; + + xml_memory::deallocate(chunk); + + chunk = next_; + } + } + + xml_stream_chunk(): next(0), size(0) + { + } + + xml_stream_chunk* next; + size_t size; + + T data[xml_memory_page_size / sizeof(T)]; + }; + + template PUGI__FN xml_parse_status load_stream_data_noseek(std::basic_istream& stream, void** out_buffer, size_t* out_size) + { + auto_deleter > chunks(0, xml_stream_chunk::destroy); + + // read file to a chunk list + size_t total = 0; + xml_stream_chunk* last = 0; + + while (!stream.eof()) + { + // allocate new chunk + xml_stream_chunk* chunk = xml_stream_chunk::create(); + if (!chunk) return status_out_of_memory; + + // append chunk to list + if (last) last = last->next = chunk; + else chunks.data = last = chunk; + + // read data to chunk + stream.read(chunk->data, static_cast(sizeof(chunk->data) / sizeof(T))); + chunk->size = static_cast(stream.gcount()) * sizeof(T); + + // read may set failbit | eofbit in case gcount() is less than read length, so check for other I/O errors + if (stream.bad() || (!stream.eof() && stream.fail())) return status_io_error; + + // guard against huge files (chunk size is small enough to make this overflow check work) + if (total + chunk->size < total) return status_out_of_memory; + total += chunk->size; + } + + size_t max_suffix_size = sizeof(char_t); + + // copy chunk list to a contiguous buffer + char* buffer = static_cast(xml_memory::allocate(total + max_suffix_size)); + if (!buffer) return status_out_of_memory; + + char* write = buffer; + + for (xml_stream_chunk* chunk = chunks.data; chunk; chunk = chunk->next) + { + assert(write + chunk->size <= buffer + total); + memcpy(write, chunk->data, chunk->size); + write += chunk->size; + } + + assert(write == buffer + total); + + // return buffer + *out_buffer = buffer; + *out_size = total; + + return status_ok; + } + + template PUGI__FN xml_parse_status load_stream_data_seek(std::basic_istream& stream, void** out_buffer, size_t* out_size) + { + // get length of remaining data in stream + typename std::basic_istream::pos_type pos = stream.tellg(); + stream.seekg(0, std::ios::end); + std::streamoff length = stream.tellg() - pos; + stream.seekg(pos); + + if (stream.fail() || pos < 0) return status_io_error; + + // guard against huge files + size_t read_length = static_cast(length); + + if (static_cast(read_length) != length || length < 0) return status_out_of_memory; + + size_t max_suffix_size = sizeof(char_t); + + // read stream data into memory (guard against stream exceptions with buffer holder) + auto_deleter buffer(xml_memory::allocate(read_length * sizeof(T) + max_suffix_size), xml_memory::deallocate); + if (!buffer.data) return status_out_of_memory; + + stream.read(static_cast(buffer.data), static_cast(read_length)); + + // read may set failbit | eofbit in case gcount() is less than read_length (i.e. line ending conversion), so check for other I/O errors + if (stream.bad() || (!stream.eof() && stream.fail())) return status_io_error; + + // return buffer + size_t actual_length = static_cast(stream.gcount()); + assert(actual_length <= read_length); + + *out_buffer = buffer.release(); + *out_size = actual_length * sizeof(T); + + return status_ok; + } + + template PUGI__FN xml_parse_result load_stream_impl(xml_document_struct* doc, std::basic_istream& stream, unsigned int options, xml_encoding encoding, char_t** out_buffer) + { + void* buffer = 0; + size_t size = 0; + xml_parse_status status = status_ok; + + // if stream has an error bit set, bail out (otherwise tellg() can fail and we'll clear error bits) + if (stream.fail()) return make_parse_result(status_io_error); + + // load stream to memory (using seek-based implementation if possible, since it's faster and takes less memory) + if (stream.tellg() < 0) + { + stream.clear(); // clear error flags that could be set by a failing tellg + status = load_stream_data_noseek(stream, &buffer, &size); + } + else + status = load_stream_data_seek(stream, &buffer, &size); + + if (status != status_ok) return make_parse_result(status); + + xml_encoding real_encoding = get_buffer_encoding(encoding, buffer, size); + + return load_buffer_impl(doc, doc, buffer, zero_terminate_buffer(buffer, size, real_encoding), options, real_encoding, true, true, out_buffer); + } +#endif + +#if defined(PUGI__MSVC_CRT_VERSION) || defined(__BORLANDC__) || (defined(__MINGW32__) && (!defined(__STRICT_ANSI__) || defined(__MINGW64_VERSION_MAJOR))) + PUGI__FN FILE* open_file_wide(const wchar_t* path, const wchar_t* mode) + { +#if defined(PUGI__MSVC_CRT_VERSION) && PUGI__MSVC_CRT_VERSION >= 1400 + FILE* file = 0; + return _wfopen_s(&file, path, mode) == 0 ? file : 0; +#else + return _wfopen(path, mode); +#endif + } +#else + PUGI__FN char* convert_path_heap(const wchar_t* str) + { + assert(str); + + // first pass: get length in utf8 characters + size_t length = strlength_wide(str); + size_t size = as_utf8_begin(str, length); + + // allocate resulting string + char* result = static_cast(xml_memory::allocate(size + 1)); + if (!result) return 0; + + // second pass: convert to utf8 + as_utf8_end(result, size, str, length); + + // zero-terminate + result[size] = 0; + + return result; + } + + PUGI__FN FILE* open_file_wide(const wchar_t* path, const wchar_t* mode) + { + // there is no standard function to open wide paths, so our best bet is to try utf8 path + char* path_utf8 = convert_path_heap(path); + if (!path_utf8) return 0; + + // convert mode to ASCII (we mirror _wfopen interface) + char mode_ascii[4] = {0}; + for (size_t i = 0; mode[i]; ++i) mode_ascii[i] = static_cast(mode[i]); + + // try to open the utf8 path + FILE* result = fopen(path_utf8, mode_ascii); + + // free dummy buffer + xml_memory::deallocate(path_utf8); + + return result; + } +#endif + + PUGI__FN FILE* open_file(const char* path, const char* mode) + { +#if defined(PUGI__MSVC_CRT_VERSION) && PUGI__MSVC_CRT_VERSION >= 1400 + FILE* file = 0; + return fopen_s(&file, path, mode) == 0 ? file : 0; +#else + return fopen(path, mode); +#endif + } + + PUGI__FN bool save_file_impl(const xml_document& doc, FILE* file, const char_t* indent, unsigned int flags, xml_encoding encoding) + { + if (!file) return false; + + xml_writer_file writer(file); + doc.save(writer, indent, flags, encoding); + + return ferror(file) == 0; + } + + struct name_null_sentry + { + xml_node_struct* node; + char_t* name; + + name_null_sentry(xml_node_struct* node_): node(node_), name(node_->name) + { + node->name = 0; + } + + ~name_null_sentry() + { + node->name = name; + } + }; +PUGI__NS_END + +namespace pugi +{ + PUGI__FN xml_writer_file::xml_writer_file(void* file_): file(file_) + { + } + + PUGI__FN void xml_writer_file::write(const void* data, size_t size) + { + size_t result = fwrite(data, 1, size, static_cast(file)); + (void)!result; // unfortunately we can't do proper error handling here + } + +#ifndef PUGIXML_NO_STL + PUGI__FN xml_writer_stream::xml_writer_stream(std::basic_ostream >& stream): narrow_stream(&stream), wide_stream(0) + { + } + + PUGI__FN xml_writer_stream::xml_writer_stream(std::basic_ostream >& stream): narrow_stream(0), wide_stream(&stream) + { + } + + PUGI__FN void xml_writer_stream::write(const void* data, size_t size) + { + if (narrow_stream) + { + assert(!wide_stream); + narrow_stream->write(reinterpret_cast(data), static_cast(size)); + } + else + { + assert(wide_stream); + assert(size % sizeof(wchar_t) == 0); + + wide_stream->write(reinterpret_cast(data), static_cast(size / sizeof(wchar_t))); + } + } +#endif + + PUGI__FN xml_tree_walker::xml_tree_walker(): _depth(0) + { + } + + PUGI__FN xml_tree_walker::~xml_tree_walker() + { + } + + PUGI__FN int xml_tree_walker::depth() const + { + return _depth; + } + + PUGI__FN bool xml_tree_walker::begin(xml_node&) + { + return true; + } + + PUGI__FN bool xml_tree_walker::end(xml_node&) + { + return true; + } + + PUGI__FN xml_attribute::xml_attribute(): _attr(0) + { + } + + PUGI__FN xml_attribute::xml_attribute(xml_attribute_struct* attr): _attr(attr) + { + } + + PUGI__FN static void unspecified_bool_xml_attribute(xml_attribute***) + { + } + + PUGI__FN xml_attribute::operator xml_attribute::unspecified_bool_type() const + { + return _attr ? unspecified_bool_xml_attribute : 0; + } + + PUGI__FN bool xml_attribute::operator!() const + { + return !_attr; + } + + PUGI__FN bool xml_attribute::operator==(const xml_attribute& r) const + { + return (_attr == r._attr); + } + + PUGI__FN bool xml_attribute::operator!=(const xml_attribute& r) const + { + return (_attr != r._attr); + } + + PUGI__FN bool xml_attribute::operator<(const xml_attribute& r) const + { + return (_attr < r._attr); + } + + PUGI__FN bool xml_attribute::operator>(const xml_attribute& r) const + { + return (_attr > r._attr); + } + + PUGI__FN bool xml_attribute::operator<=(const xml_attribute& r) const + { + return (_attr <= r._attr); + } + + PUGI__FN bool xml_attribute::operator>=(const xml_attribute& r) const + { + return (_attr >= r._attr); + } + + PUGI__FN xml_attribute xml_attribute::next_attribute() const + { + return _attr ? xml_attribute(_attr->next_attribute) : xml_attribute(); + } + + PUGI__FN xml_attribute xml_attribute::previous_attribute() const + { + return _attr && _attr->prev_attribute_c->next_attribute ? xml_attribute(_attr->prev_attribute_c) : xml_attribute(); + } + + PUGI__FN const char_t* xml_attribute::as_string(const char_t* def) const + { + return (_attr && _attr->value) ? _attr->value + 0 : def; + } + + PUGI__FN int xml_attribute::as_int(int def) const + { + return (_attr && _attr->value) ? impl::get_value_int(_attr->value) : def; + } + + PUGI__FN unsigned int xml_attribute::as_uint(unsigned int def) const + { + return (_attr && _attr->value) ? impl::get_value_uint(_attr->value) : def; + } + + PUGI__FN double xml_attribute::as_double(double def) const + { + return (_attr && _attr->value) ? impl::get_value_double(_attr->value) : def; + } + + PUGI__FN float xml_attribute::as_float(float def) const + { + return (_attr && _attr->value) ? impl::get_value_float(_attr->value) : def; + } + + PUGI__FN bool xml_attribute::as_bool(bool def) const + { + return (_attr && _attr->value) ? impl::get_value_bool(_attr->value) : def; + } + +#ifdef PUGIXML_HAS_LONG_LONG + PUGI__FN long long xml_attribute::as_llong(long long def) const + { + return (_attr && _attr->value) ? impl::get_value_llong(_attr->value) : def; + } + + PUGI__FN unsigned long long xml_attribute::as_ullong(unsigned long long def) const + { + return (_attr && _attr->value) ? impl::get_value_ullong(_attr->value) : def; + } +#endif + + PUGI__FN bool xml_attribute::empty() const + { + return !_attr; + } + + PUGI__FN const char_t* xml_attribute::name() const + { + return (_attr && _attr->name) ? _attr->name + 0 : PUGIXML_TEXT(""); + } + + PUGI__FN const char_t* xml_attribute::value() const + { + return (_attr && _attr->value) ? _attr->value + 0 : PUGIXML_TEXT(""); + } + + PUGI__FN size_t xml_attribute::hash_value() const + { + return static_cast(reinterpret_cast(_attr) / sizeof(xml_attribute_struct)); + } + + PUGI__FN xml_attribute_struct* xml_attribute::internal_object() const + { + return _attr; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(const char_t* rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(int rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(unsigned int rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(long rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(unsigned long rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(double rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(float rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(bool rhs) + { + set_value(rhs); + return *this; + } + +#ifdef PUGIXML_HAS_LONG_LONG + PUGI__FN xml_attribute& xml_attribute::operator=(long long rhs) + { + set_value(rhs); + return *this; + } + + PUGI__FN xml_attribute& xml_attribute::operator=(unsigned long long rhs) + { + set_value(rhs); + return *this; + } +#endif + + PUGI__FN bool xml_attribute::set_name(const char_t* rhs) + { + if (!_attr) return false; + + return impl::strcpy_insitu(_attr->name, _attr->header, impl::xml_memory_page_name_allocated_mask, rhs, impl::strlength(rhs)); + } + + PUGI__FN bool xml_attribute::set_value(const char_t* rhs) + { + if (!_attr) return false; + + return impl::strcpy_insitu(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, impl::strlength(rhs)); + } + + PUGI__FN bool xml_attribute::set_value(int rhs) + { + if (!_attr) return false; + + return impl::set_value_integer(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, rhs < 0); + } + + PUGI__FN bool xml_attribute::set_value(unsigned int rhs) + { + if (!_attr) return false; + + return impl::set_value_integer(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, false); + } + + PUGI__FN bool xml_attribute::set_value(long rhs) + { + if (!_attr) return false; + + return impl::set_value_integer(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, rhs < 0); + } + + PUGI__FN bool xml_attribute::set_value(unsigned long rhs) + { + if (!_attr) return false; + + return impl::set_value_integer(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, false); + } + + PUGI__FN bool xml_attribute::set_value(double rhs) + { + if (!_attr) return false; + + return impl::set_value_convert(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, default_double_precision); + } + + PUGI__FN bool xml_attribute::set_value(double rhs, int precision) + { + if (!_attr) return false; + + return impl::set_value_convert(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, precision); + } + + PUGI__FN bool xml_attribute::set_value(float rhs) + { + if (!_attr) return false; + + return impl::set_value_convert(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, default_float_precision); + } + + PUGI__FN bool xml_attribute::set_value(float rhs, int precision) + { + if (!_attr) return false; + + return impl::set_value_convert(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, precision); + } + + PUGI__FN bool xml_attribute::set_value(bool rhs) + { + if (!_attr) return false; + + return impl::set_value_bool(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs); + } + +#ifdef PUGIXML_HAS_LONG_LONG + PUGI__FN bool xml_attribute::set_value(long long rhs) + { + if (!_attr) return false; + + return impl::set_value_integer(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, rhs < 0); + } + + PUGI__FN bool xml_attribute::set_value(unsigned long long rhs) + { + if (!_attr) return false; + + return impl::set_value_integer(_attr->value, _attr->header, impl::xml_memory_page_value_allocated_mask, rhs, false); + } +#endif + +#ifdef __BORLANDC__ + PUGI__FN bool operator&&(const xml_attribute& lhs, bool rhs) + { + return (bool)lhs && rhs; + } + + PUGI__FN bool operator||(const xml_attribute& lhs, bool rhs) + { + return (bool)lhs || rhs; + } +#endif + + PUGI__FN xml_node::xml_node(): _root(0) + { + } + + PUGI__FN xml_node::xml_node(xml_node_struct* p): _root(p) + { + } + + PUGI__FN static void unspecified_bool_xml_node(xml_node***) + { + } + + PUGI__FN xml_node::operator xml_node::unspecified_bool_type() const + { + return _root ? unspecified_bool_xml_node : 0; + } + + PUGI__FN bool xml_node::operator!() const + { + return !_root; + } + + PUGI__FN xml_node::iterator xml_node::begin() const + { + return iterator(_root ? _root->first_child + 0 : 0, _root); + } + + PUGI__FN xml_node::iterator xml_node::end() const + { + return iterator(0, _root); + } + + PUGI__FN xml_node::attribute_iterator xml_node::attributes_begin() const + { + return attribute_iterator(_root ? _root->first_attribute + 0 : 0, _root); + } + + PUGI__FN xml_node::attribute_iterator xml_node::attributes_end() const + { + return attribute_iterator(0, _root); + } + + PUGI__FN xml_object_range xml_node::children() const + { + return xml_object_range(begin(), end()); + } + + PUGI__FN xml_object_range xml_node::children(const char_t* name_) const + { + return xml_object_range(xml_named_node_iterator(child(name_)._root, _root, name_), xml_named_node_iterator(0, _root, name_)); + } + + PUGI__FN xml_object_range xml_node::attributes() const + { + return xml_object_range(attributes_begin(), attributes_end()); + } + + PUGI__FN bool xml_node::operator==(const xml_node& r) const + { + return (_root == r._root); + } + + PUGI__FN bool xml_node::operator!=(const xml_node& r) const + { + return (_root != r._root); + } + + PUGI__FN bool xml_node::operator<(const xml_node& r) const + { + return (_root < r._root); + } + + PUGI__FN bool xml_node::operator>(const xml_node& r) const + { + return (_root > r._root); + } + + PUGI__FN bool xml_node::operator<=(const xml_node& r) const + { + return (_root <= r._root); + } + + PUGI__FN bool xml_node::operator>=(const xml_node& r) const + { + return (_root >= r._root); + } + + PUGI__FN bool xml_node::empty() const + { + return !_root; + } + + PUGI__FN const char_t* xml_node::name() const + { + return (_root && _root->name) ? _root->name + 0 : PUGIXML_TEXT(""); + } + + PUGI__FN xml_node_type xml_node::type() const + { + return _root ? PUGI__NODETYPE(_root) : node_null; + } + + PUGI__FN const char_t* xml_node::value() const + { + return (_root && _root->value) ? _root->value + 0 : PUGIXML_TEXT(""); + } + + PUGI__FN xml_node xml_node::child(const char_t* name_) const + { + if (!_root) return xml_node(); + + for (xml_node_struct* i = _root->first_child; i; i = i->next_sibling) + if (i->name && impl::strequal(name_, i->name)) return xml_node(i); + + return xml_node(); + } + + PUGI__FN xml_attribute xml_node::attribute(const char_t* name_) const + { + if (!_root) return xml_attribute(); + + for (xml_attribute_struct* i = _root->first_attribute; i; i = i->next_attribute) + if (i->name && impl::strequal(name_, i->name)) + return xml_attribute(i); + + return xml_attribute(); + } + + PUGI__FN xml_node xml_node::next_sibling(const char_t* name_) const + { + if (!_root) return xml_node(); + + for (xml_node_struct* i = _root->next_sibling; i; i = i->next_sibling) + if (i->name && impl::strequal(name_, i->name)) return xml_node(i); + + return xml_node(); + } + + PUGI__FN xml_node xml_node::next_sibling() const + { + return _root ? xml_node(_root->next_sibling) : xml_node(); + } + + PUGI__FN xml_node xml_node::previous_sibling(const char_t* name_) const + { + if (!_root) return xml_node(); + + for (xml_node_struct* i = _root->prev_sibling_c; i->next_sibling; i = i->prev_sibling_c) + if (i->name && impl::strequal(name_, i->name)) return xml_node(i); + + return xml_node(); + } + + PUGI__FN xml_attribute xml_node::attribute(const char_t* name_, xml_attribute& hint_) const + { + xml_attribute_struct* hint = hint_._attr; + + // if hint is not an attribute of node, behavior is not defined + assert(!hint || (_root && impl::is_attribute_of(hint, _root))); + + if (!_root) return xml_attribute(); + + // optimistically search from hint up until the end + for (xml_attribute_struct* i = hint; i; i = i->next_attribute) + if (i->name && impl::strequal(name_, i->name)) + { + // update hint to maximize efficiency of searching for consecutive attributes + hint_._attr = i->next_attribute; + + return xml_attribute(i); + } + + // wrap around and search from the first attribute until the hint + // 'j' null pointer check is technically redundant, but it prevents a crash in case the assertion above fails + for (xml_attribute_struct* j = _root->first_attribute; j && j != hint; j = j->next_attribute) + if (j->name && impl::strequal(name_, j->name)) + { + // update hint to maximize efficiency of searching for consecutive attributes + hint_._attr = j->next_attribute; + + return xml_attribute(j); + } + + return xml_attribute(); + } + + PUGI__FN xml_node xml_node::previous_sibling() const + { + if (!_root) return xml_node(); + + if (_root->prev_sibling_c->next_sibling) return xml_node(_root->prev_sibling_c); + else return xml_node(); + } + + PUGI__FN xml_node xml_node::parent() const + { + return _root ? xml_node(_root->parent) : xml_node(); + } + + PUGI__FN xml_node xml_node::root() const + { + return _root ? xml_node(&impl::get_document(_root)) : xml_node(); + } + + PUGI__FN xml_text xml_node::text() const + { + return xml_text(_root); + } + + PUGI__FN const char_t* xml_node::child_value() const + { + if (!_root) return PUGIXML_TEXT(""); + + // element nodes can have value if parse_embed_pcdata was used + if (PUGI__NODETYPE(_root) == node_element && _root->value) + return _root->value; + + for (xml_node_struct* i = _root->first_child; i; i = i->next_sibling) + if (impl::is_text_node(i) && i->value) + return i->value; + + return PUGIXML_TEXT(""); + } + + PUGI__FN const char_t* xml_node::child_value(const char_t* name_) const + { + return child(name_).child_value(); + } + + PUGI__FN xml_attribute xml_node::first_attribute() const + { + return _root ? xml_attribute(_root->first_attribute) : xml_attribute(); + } + + PUGI__FN xml_attribute xml_node::last_attribute() const + { + return _root && _root->first_attribute ? xml_attribute(_root->first_attribute->prev_attribute_c) : xml_attribute(); + } + + PUGI__FN xml_node xml_node::first_child() const + { + return _root ? xml_node(_root->first_child) : xml_node(); + } + + PUGI__FN xml_node xml_node::last_child() const + { + return _root && _root->first_child ? xml_node(_root->first_child->prev_sibling_c) : xml_node(); + } + + PUGI__FN bool xml_node::set_name(const char_t* rhs) + { + xml_node_type type_ = _root ? PUGI__NODETYPE(_root) : node_null; + + if (type_ != node_element && type_ != node_pi && type_ != node_declaration) + return false; + + return impl::strcpy_insitu(_root->name, _root->header, impl::xml_memory_page_name_allocated_mask, rhs, impl::strlength(rhs)); + } + + PUGI__FN bool xml_node::set_value(const char_t* rhs) + { + xml_node_type type_ = _root ? PUGI__NODETYPE(_root) : node_null; + + if (type_ != node_pcdata && type_ != node_cdata && type_ != node_comment && type_ != node_pi && type_ != node_doctype) + return false; + + return impl::strcpy_insitu(_root->value, _root->header, impl::xml_memory_page_value_allocated_mask, rhs, impl::strlength(rhs)); + } + + PUGI__FN xml_attribute xml_node::append_attribute(const char_t* name_) + { + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::append_attribute(a._attr, _root); + + a.set_name(name_); + + return a; + } + + PUGI__FN xml_attribute xml_node::prepend_attribute(const char_t* name_) + { + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::prepend_attribute(a._attr, _root); + + a.set_name(name_); + + return a; + } + + PUGI__FN xml_attribute xml_node::insert_attribute_after(const char_t* name_, const xml_attribute& attr) + { + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + if (!attr || !impl::is_attribute_of(attr._attr, _root)) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::insert_attribute_after(a._attr, attr._attr, _root); + + a.set_name(name_); + + return a; + } + + PUGI__FN xml_attribute xml_node::insert_attribute_before(const char_t* name_, const xml_attribute& attr) + { + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + if (!attr || !impl::is_attribute_of(attr._attr, _root)) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::insert_attribute_before(a._attr, attr._attr, _root); + + a.set_name(name_); + + return a; + } + + PUGI__FN xml_attribute xml_node::append_copy(const xml_attribute& proto) + { + if (!proto) return xml_attribute(); + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::append_attribute(a._attr, _root); + impl::node_copy_attribute(a._attr, proto._attr); + + return a; + } + + PUGI__FN xml_attribute xml_node::prepend_copy(const xml_attribute& proto) + { + if (!proto) return xml_attribute(); + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::prepend_attribute(a._attr, _root); + impl::node_copy_attribute(a._attr, proto._attr); + + return a; + } + + PUGI__FN xml_attribute xml_node::insert_copy_after(const xml_attribute& proto, const xml_attribute& attr) + { + if (!proto) return xml_attribute(); + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + if (!attr || !impl::is_attribute_of(attr._attr, _root)) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::insert_attribute_after(a._attr, attr._attr, _root); + impl::node_copy_attribute(a._attr, proto._attr); + + return a; + } + + PUGI__FN xml_attribute xml_node::insert_copy_before(const xml_attribute& proto, const xml_attribute& attr) + { + if (!proto) return xml_attribute(); + if (!impl::allow_insert_attribute(type())) return xml_attribute(); + if (!attr || !impl::is_attribute_of(attr._attr, _root)) return xml_attribute(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_attribute(); + + xml_attribute a(impl::allocate_attribute(alloc)); + if (!a) return xml_attribute(); + + impl::insert_attribute_before(a._attr, attr._attr, _root); + impl::node_copy_attribute(a._attr, proto._attr); + + return a; + } + + PUGI__FN xml_node xml_node::append_child(xml_node_type type_) + { + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::append_node(n._root, _root); + + if (type_ == node_declaration) n.set_name(PUGIXML_TEXT("xml")); + + return n; + } + + PUGI__FN xml_node xml_node::prepend_child(xml_node_type type_) + { + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::prepend_node(n._root, _root); + + if (type_ == node_declaration) n.set_name(PUGIXML_TEXT("xml")); + + return n; + } + + PUGI__FN xml_node xml_node::insert_child_before(xml_node_type type_, const xml_node& node) + { + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + if (!node._root || node._root->parent != _root) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::insert_node_before(n._root, node._root); + + if (type_ == node_declaration) n.set_name(PUGIXML_TEXT("xml")); + + return n; + } + + PUGI__FN xml_node xml_node::insert_child_after(xml_node_type type_, const xml_node& node) + { + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + if (!node._root || node._root->parent != _root) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::insert_node_after(n._root, node._root); + + if (type_ == node_declaration) n.set_name(PUGIXML_TEXT("xml")); + + return n; + } + + PUGI__FN xml_node xml_node::append_child(const char_t* name_) + { + xml_node result = append_child(node_element); + + result.set_name(name_); + + return result; + } + + PUGI__FN xml_node xml_node::prepend_child(const char_t* name_) + { + xml_node result = prepend_child(node_element); + + result.set_name(name_); + + return result; + } + + PUGI__FN xml_node xml_node::insert_child_after(const char_t* name_, const xml_node& node) + { + xml_node result = insert_child_after(node_element, node); + + result.set_name(name_); + + return result; + } + + PUGI__FN xml_node xml_node::insert_child_before(const char_t* name_, const xml_node& node) + { + xml_node result = insert_child_before(node_element, node); + + result.set_name(name_); + + return result; + } + + PUGI__FN xml_node xml_node::append_copy(const xml_node& proto) + { + xml_node_type type_ = proto.type(); + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::append_node(n._root, _root); + impl::node_copy_tree(n._root, proto._root); + + return n; + } + + PUGI__FN xml_node xml_node::prepend_copy(const xml_node& proto) + { + xml_node_type type_ = proto.type(); + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::prepend_node(n._root, _root); + impl::node_copy_tree(n._root, proto._root); + + return n; + } + + PUGI__FN xml_node xml_node::insert_copy_after(const xml_node& proto, const xml_node& node) + { + xml_node_type type_ = proto.type(); + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + if (!node._root || node._root->parent != _root) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::insert_node_after(n._root, node._root); + impl::node_copy_tree(n._root, proto._root); + + return n; + } + + PUGI__FN xml_node xml_node::insert_copy_before(const xml_node& proto, const xml_node& node) + { + xml_node_type type_ = proto.type(); + if (!impl::allow_insert_child(type(), type_)) return xml_node(); + if (!node._root || node._root->parent != _root) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + xml_node n(impl::allocate_node(alloc, type_)); + if (!n) return xml_node(); + + impl::insert_node_before(n._root, node._root); + impl::node_copy_tree(n._root, proto._root); + + return n; + } + + PUGI__FN xml_node xml_node::append_move(const xml_node& moved) + { + if (!impl::allow_move(*this, moved)) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + // disable document_buffer_order optimization since moving nodes around changes document order without changing buffer pointers + impl::get_document(_root).header |= impl::xml_memory_page_contents_shared_mask; + + impl::remove_node(moved._root); + impl::append_node(moved._root, _root); + + return moved; + } + + PUGI__FN xml_node xml_node::prepend_move(const xml_node& moved) + { + if (!impl::allow_move(*this, moved)) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + // disable document_buffer_order optimization since moving nodes around changes document order without changing buffer pointers + impl::get_document(_root).header |= impl::xml_memory_page_contents_shared_mask; + + impl::remove_node(moved._root); + impl::prepend_node(moved._root, _root); + + return moved; + } + + PUGI__FN xml_node xml_node::insert_move_after(const xml_node& moved, const xml_node& node) + { + if (!impl::allow_move(*this, moved)) return xml_node(); + if (!node._root || node._root->parent != _root) return xml_node(); + if (moved._root == node._root) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + // disable document_buffer_order optimization since moving nodes around changes document order without changing buffer pointers + impl::get_document(_root).header |= impl::xml_memory_page_contents_shared_mask; + + impl::remove_node(moved._root); + impl::insert_node_after(moved._root, node._root); + + return moved; + } + + PUGI__FN xml_node xml_node::insert_move_before(const xml_node& moved, const xml_node& node) + { + if (!impl::allow_move(*this, moved)) return xml_node(); + if (!node._root || node._root->parent != _root) return xml_node(); + if (moved._root == node._root) return xml_node(); + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return xml_node(); + + // disable document_buffer_order optimization since moving nodes around changes document order without changing buffer pointers + impl::get_document(_root).header |= impl::xml_memory_page_contents_shared_mask; + + impl::remove_node(moved._root); + impl::insert_node_before(moved._root, node._root); + + return moved; + } + + PUGI__FN bool xml_node::remove_attribute(const char_t* name_) + { + return remove_attribute(attribute(name_)); + } + + PUGI__FN bool xml_node::remove_attribute(const xml_attribute& a) + { + if (!_root || !a._attr) return false; + if (!impl::is_attribute_of(a._attr, _root)) return false; + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return false; + + impl::remove_attribute(a._attr, _root); + impl::destroy_attribute(a._attr, alloc); + + return true; + } + + PUGI__FN bool xml_node::remove_attributes() + { + if (!_root) return false; + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return false; + + for (xml_attribute_struct* attr = _root->first_attribute; attr; ) + { + xml_attribute_struct* next = attr->next_attribute; + + impl::destroy_attribute(attr, alloc); + + attr = next; + } + + _root->first_attribute = 0; + + return true; + } + + PUGI__FN bool xml_node::remove_child(const char_t* name_) + { + return remove_child(child(name_)); + } + + PUGI__FN bool xml_node::remove_child(const xml_node& n) + { + if (!_root || !n._root || n._root->parent != _root) return false; + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return false; + + impl::remove_node(n._root); + impl::destroy_node(n._root, alloc); + + return true; + } + + PUGI__FN bool xml_node::remove_children() + { + if (!_root) return false; + + impl::xml_allocator& alloc = impl::get_allocator(_root); + if (!alloc.reserve()) return false; + + for (xml_node_struct* cur = _root->first_child; cur; ) + { + xml_node_struct* next = cur->next_sibling; + + impl::destroy_node(cur, alloc); + + cur = next; + } + + _root->first_child = 0; + + return true; + } + + PUGI__FN xml_parse_result xml_node::append_buffer(const void* contents, size_t size, unsigned int options, xml_encoding encoding) + { + // append_buffer is only valid for elements/documents + if (!impl::allow_insert_child(type(), node_element)) return impl::make_parse_result(status_append_invalid_root); + + // get document node + impl::xml_document_struct* doc = &impl::get_document(_root); + + // disable document_buffer_order optimization since in a document with multiple buffers comparing buffer pointers does not make sense + doc->header |= impl::xml_memory_page_contents_shared_mask; + + // get extra buffer element (we'll store the document fragment buffer there so that we can deallocate it later) + impl::xml_memory_page* page = 0; + impl::xml_extra_buffer* extra = static_cast(doc->allocate_memory(sizeof(impl::xml_extra_buffer) + sizeof(void*), page)); + (void)page; + + if (!extra) return impl::make_parse_result(status_out_of_memory); + + #ifdef PUGIXML_COMPACT + // align the memory block to a pointer boundary; this is required for compact mode where memory allocations are only 4b aligned + // note that this requires up to sizeof(void*)-1 additional memory, which the allocation above takes into account + extra = reinterpret_cast((reinterpret_cast(extra) + (sizeof(void*) - 1)) & ~(sizeof(void*) - 1)); + #endif + + // add extra buffer to the list + extra->buffer = 0; + extra->next = doc->extra_buffers; + doc->extra_buffers = extra; + + // name of the root has to be NULL before parsing - otherwise closing node mismatches will not be detected at the top level + impl::name_null_sentry sentry(_root); + + return impl::load_buffer_impl(doc, _root, const_cast(contents), size, options, encoding, false, false, &extra->buffer); + } + + PUGI__FN xml_node xml_node::find_child_by_attribute(const char_t* name_, const char_t* attr_name, const char_t* attr_value) const + { + if (!_root) return xml_node(); + + for (xml_node_struct* i = _root->first_child; i; i = i->next_sibling) + if (i->name && impl::strequal(name_, i->name)) + { + for (xml_attribute_struct* a = i->first_attribute; a; a = a->next_attribute) + if (a->name && impl::strequal(attr_name, a->name) && impl::strequal(attr_value, a->value ? a->value + 0 : PUGIXML_TEXT(""))) + return xml_node(i); + } + + return xml_node(); + } + + PUGI__FN xml_node xml_node::find_child_by_attribute(const char_t* attr_name, const char_t* attr_value) const + { + if (!_root) return xml_node(); + + for (xml_node_struct* i = _root->first_child; i; i = i->next_sibling) + for (xml_attribute_struct* a = i->first_attribute; a; a = a->next_attribute) + if (a->name && impl::strequal(attr_name, a->name) && impl::strequal(attr_value, a->value ? a->value + 0 : PUGIXML_TEXT(""))) + return xml_node(i); + + return xml_node(); + } + +#ifndef PUGIXML_NO_STL + PUGI__FN string_t xml_node::path(char_t delimiter) const + { + if (!_root) return string_t(); + + size_t offset = 0; + + for (xml_node_struct* i = _root; i; i = i->parent) + { + offset += (i != _root); + offset += i->name ? impl::strlength(i->name) : 0; + } + + string_t result; + result.resize(offset); + + for (xml_node_struct* j = _root; j; j = j->parent) + { + if (j != _root) + result[--offset] = delimiter; + + if (j->name) + { + size_t length = impl::strlength(j->name); + + offset -= length; + memcpy(&result[offset], j->name, length * sizeof(char_t)); + } + } + + assert(offset == 0); + + return result; + } +#endif + + PUGI__FN xml_node xml_node::first_element_by_path(const char_t* path_, char_t delimiter) const + { + xml_node context = path_[0] == delimiter ? root() : *this; + + if (!context._root) return xml_node(); + + const char_t* path_segment = path_; + + while (*path_segment == delimiter) ++path_segment; + + const char_t* path_segment_end = path_segment; + + while (*path_segment_end && *path_segment_end != delimiter) ++path_segment_end; + + if (path_segment == path_segment_end) return context; + + const char_t* next_segment = path_segment_end; + + while (*next_segment == delimiter) ++next_segment; + + if (*path_segment == '.' && path_segment + 1 == path_segment_end) + return context.first_element_by_path(next_segment, delimiter); + else if (*path_segment == '.' && *(path_segment+1) == '.' && path_segment + 2 == path_segment_end) + return context.parent().first_element_by_path(next_segment, delimiter); + else + { + for (xml_node_struct* j = context._root->first_child; j; j = j->next_sibling) + { + if (j->name && impl::strequalrange(j->name, path_segment, static_cast(path_segment_end - path_segment))) + { + xml_node subsearch = xml_node(j).first_element_by_path(next_segment, delimiter); + + if (subsearch) return subsearch; + } + } + + return xml_node(); + } + } + + PUGI__FN bool xml_node::traverse(xml_tree_walker& walker) + { + walker._depth = -1; + + xml_node arg_begin(_root); + if (!walker.begin(arg_begin)) return false; + + xml_node_struct* cur = _root ? _root->first_child + 0 : 0; + + if (cur) + { + ++walker._depth; + + do + { + xml_node arg_for_each(cur); + if (!walker.for_each(arg_for_each)) + return false; + + if (cur->first_child) + { + ++walker._depth; + cur = cur->first_child; + } + else if (cur->next_sibling) + cur = cur->next_sibling; + else + { + while (!cur->next_sibling && cur != _root && cur->parent) + { + --walker._depth; + cur = cur->parent; + } + + if (cur != _root) + cur = cur->next_sibling; + } + } + while (cur && cur != _root); + } + + assert(walker._depth == -1); + + xml_node arg_end(_root); + return walker.end(arg_end); + } + + PUGI__FN size_t xml_node::hash_value() const + { + return static_cast(reinterpret_cast(_root) / sizeof(xml_node_struct)); + } + + PUGI__FN xml_node_struct* xml_node::internal_object() const + { + return _root; + } + + PUGI__FN void xml_node::print(xml_writer& writer, const char_t* indent, unsigned int flags, xml_encoding encoding, unsigned int depth) const + { + if (!_root) return; + + impl::xml_buffered_writer buffered_writer(writer, encoding); + + impl::node_output(buffered_writer, _root, indent, flags, depth); + + buffered_writer.flush(); + } + +#ifndef PUGIXML_NO_STL + PUGI__FN void xml_node::print(std::basic_ostream >& stream, const char_t* indent, unsigned int flags, xml_encoding encoding, unsigned int depth) const + { + xml_writer_stream writer(stream); + + print(writer, indent, flags, encoding, depth); + } + + PUGI__FN void xml_node::print(std::basic_ostream >& stream, const char_t* indent, unsigned int flags, unsigned int depth) const + { + xml_writer_stream writer(stream); + + print(writer, indent, flags, encoding_wchar, depth); + } +#endif + + PUGI__FN ptrdiff_t xml_node::offset_debug() const + { + if (!_root) return -1; + + impl::xml_document_struct& doc = impl::get_document(_root); + + // we can determine the offset reliably only if there is exactly once parse buffer + if (!doc.buffer || doc.extra_buffers) return -1; + + switch (type()) + { + case node_document: + return 0; + + case node_element: + case node_declaration: + case node_pi: + return _root->name && (_root->header & impl::xml_memory_page_name_allocated_or_shared_mask) == 0 ? _root->name - doc.buffer : -1; + + case node_pcdata: + case node_cdata: + case node_comment: + case node_doctype: + return _root->value && (_root->header & impl::xml_memory_page_value_allocated_or_shared_mask) == 0 ? _root->value - doc.buffer : -1; + + default: + assert(false && "Invalid node type"); // unreachable + return -1; + } + } + +#ifdef __BORLANDC__ + PUGI__FN bool operator&&(const xml_node& lhs, bool rhs) + { + return (bool)lhs && rhs; + } + + PUGI__FN bool operator||(const xml_node& lhs, bool rhs) + { + return (bool)lhs || rhs; + } +#endif + + PUGI__FN xml_text::xml_text(xml_node_struct* root): _root(root) + { + } + + PUGI__FN xml_node_struct* xml_text::_data() const + { + if (!_root || impl::is_text_node(_root)) return _root; + + // element nodes can have value if parse_embed_pcdata was used + if (PUGI__NODETYPE(_root) == node_element && _root->value) + return _root; + + for (xml_node_struct* node = _root->first_child; node; node = node->next_sibling) + if (impl::is_text_node(node)) + return node; + + return 0; + } + + PUGI__FN xml_node_struct* xml_text::_data_new() + { + xml_node_struct* d = _data(); + if (d) return d; + + return xml_node(_root).append_child(node_pcdata).internal_object(); + } + + PUGI__FN xml_text::xml_text(): _root(0) + { + } + + PUGI__FN static void unspecified_bool_xml_text(xml_text***) + { + } + + PUGI__FN xml_text::operator xml_text::unspecified_bool_type() const + { + return _data() ? unspecified_bool_xml_text : 0; + } + + PUGI__FN bool xml_text::operator!() const + { + return !_data(); + } + + PUGI__FN bool xml_text::empty() const + { + return _data() == 0; + } + + PUGI__FN const char_t* xml_text::get() const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? d->value + 0 : PUGIXML_TEXT(""); + } + + PUGI__FN const char_t* xml_text::as_string(const char_t* def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? d->value + 0 : def; + } + + PUGI__FN int xml_text::as_int(int def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? impl::get_value_int(d->value) : def; + } + + PUGI__FN unsigned int xml_text::as_uint(unsigned int def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? impl::get_value_uint(d->value) : def; + } + + PUGI__FN double xml_text::as_double(double def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? impl::get_value_double(d->value) : def; + } + + PUGI__FN float xml_text::as_float(float def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? impl::get_value_float(d->value) : def; + } + + PUGI__FN bool xml_text::as_bool(bool def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? impl::get_value_bool(d->value) : def; + } + +#ifdef PUGIXML_HAS_LONG_LONG + PUGI__FN long long xml_text::as_llong(long long def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? impl::get_value_llong(d->value) : def; + } + + PUGI__FN unsigned long long xml_text::as_ullong(unsigned long long def) const + { + xml_node_struct* d = _data(); + + return (d && d->value) ? impl::get_value_ullong(d->value) : def; + } +#endif + + PUGI__FN bool xml_text::set(const char_t* rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::strcpy_insitu(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, impl::strlength(rhs)) : false; + } + + PUGI__FN bool xml_text::set(int rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_integer(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, rhs < 0) : false; + } + + PUGI__FN bool xml_text::set(unsigned int rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_integer(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, false) : false; + } + + PUGI__FN bool xml_text::set(long rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_integer(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, rhs < 0) : false; + } + + PUGI__FN bool xml_text::set(unsigned long rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_integer(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, false) : false; + } + + PUGI__FN bool xml_text::set(float rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_convert(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, default_float_precision) : false; + } + + PUGI__FN bool xml_text::set(float rhs, int precision) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_convert(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, precision) : false; + } + + PUGI__FN bool xml_text::set(double rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_convert(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, default_double_precision) : false; + } + + PUGI__FN bool xml_text::set(double rhs, int precision) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_convert(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, precision) : false; + } + + PUGI__FN bool xml_text::set(bool rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_bool(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs) : false; + } + +#ifdef PUGIXML_HAS_LONG_LONG + PUGI__FN bool xml_text::set(long long rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_integer(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, rhs < 0) : false; + } + + PUGI__FN bool xml_text::set(unsigned long long rhs) + { + xml_node_struct* dn = _data_new(); + + return dn ? impl::set_value_integer(dn->value, dn->header, impl::xml_memory_page_value_allocated_mask, rhs, false) : false; + } +#endif + + PUGI__FN xml_text& xml_text::operator=(const char_t* rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(int rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(unsigned int rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(long rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(unsigned long rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(double rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(float rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(bool rhs) + { + set(rhs); + return *this; + } + +#ifdef PUGIXML_HAS_LONG_LONG + PUGI__FN xml_text& xml_text::operator=(long long rhs) + { + set(rhs); + return *this; + } + + PUGI__FN xml_text& xml_text::operator=(unsigned long long rhs) + { + set(rhs); + return *this; + } +#endif + + PUGI__FN xml_node xml_text::data() const + { + return xml_node(_data()); + } + +#ifdef __BORLANDC__ + PUGI__FN bool operator&&(const xml_text& lhs, bool rhs) + { + return (bool)lhs && rhs; + } + + PUGI__FN bool operator||(const xml_text& lhs, bool rhs) + { + return (bool)lhs || rhs; + } +#endif + + PUGI__FN xml_node_iterator::xml_node_iterator() + { + } + + PUGI__FN xml_node_iterator::xml_node_iterator(const xml_node& node): _wrap(node), _parent(node.parent()) + { + } + + PUGI__FN xml_node_iterator::xml_node_iterator(xml_node_struct* ref, xml_node_struct* parent): _wrap(ref), _parent(parent) + { + } + + PUGI__FN bool xml_node_iterator::operator==(const xml_node_iterator& rhs) const + { + return _wrap._root == rhs._wrap._root && _parent._root == rhs._parent._root; + } + + PUGI__FN bool xml_node_iterator::operator!=(const xml_node_iterator& rhs) const + { + return _wrap._root != rhs._wrap._root || _parent._root != rhs._parent._root; + } + + PUGI__FN xml_node& xml_node_iterator::operator*() const + { + assert(_wrap._root); + return _wrap; + } + + PUGI__FN xml_node* xml_node_iterator::operator->() const + { + assert(_wrap._root); + return const_cast(&_wrap); // BCC5 workaround + } + + PUGI__FN xml_node_iterator& xml_node_iterator::operator++() + { + assert(_wrap._root); + _wrap._root = _wrap._root->next_sibling; + return *this; + } + + PUGI__FN xml_node_iterator xml_node_iterator::operator++(int) + { + xml_node_iterator temp = *this; + ++*this; + return temp; + } + + PUGI__FN xml_node_iterator& xml_node_iterator::operator--() + { + _wrap = _wrap._root ? _wrap.previous_sibling() : _parent.last_child(); + return *this; + } + + PUGI__FN xml_node_iterator xml_node_iterator::operator--(int) + { + xml_node_iterator temp = *this; + --*this; + return temp; + } + + PUGI__FN xml_attribute_iterator::xml_attribute_iterator() + { + } + + PUGI__FN xml_attribute_iterator::xml_attribute_iterator(const xml_attribute& attr, const xml_node& parent): _wrap(attr), _parent(parent) + { + } + + PUGI__FN xml_attribute_iterator::xml_attribute_iterator(xml_attribute_struct* ref, xml_node_struct* parent): _wrap(ref), _parent(parent) + { + } + + PUGI__FN bool xml_attribute_iterator::operator==(const xml_attribute_iterator& rhs) const + { + return _wrap._attr == rhs._wrap._attr && _parent._root == rhs._parent._root; + } + + PUGI__FN bool xml_attribute_iterator::operator!=(const xml_attribute_iterator& rhs) const + { + return _wrap._attr != rhs._wrap._attr || _parent._root != rhs._parent._root; + } + + PUGI__FN xml_attribute& xml_attribute_iterator::operator*() const + { + assert(_wrap._attr); + return _wrap; + } + + PUGI__FN xml_attribute* xml_attribute_iterator::operator->() const + { + assert(_wrap._attr); + return const_cast(&_wrap); // BCC5 workaround + } + + PUGI__FN xml_attribute_iterator& xml_attribute_iterator::operator++() + { + assert(_wrap._attr); + _wrap._attr = _wrap._attr->next_attribute; + return *this; + } + + PUGI__FN xml_attribute_iterator xml_attribute_iterator::operator++(int) + { + xml_attribute_iterator temp = *this; + ++*this; + return temp; + } + + PUGI__FN xml_attribute_iterator& xml_attribute_iterator::operator--() + { + _wrap = _wrap._attr ? _wrap.previous_attribute() : _parent.last_attribute(); + return *this; + } + + PUGI__FN xml_attribute_iterator xml_attribute_iterator::operator--(int) + { + xml_attribute_iterator temp = *this; + --*this; + return temp; + } + + PUGI__FN xml_named_node_iterator::xml_named_node_iterator(): _name(0) + { + } + + PUGI__FN xml_named_node_iterator::xml_named_node_iterator(const xml_node& node, const char_t* name): _wrap(node), _parent(node.parent()), _name(name) + { + } + + PUGI__FN xml_named_node_iterator::xml_named_node_iterator(xml_node_struct* ref, xml_node_struct* parent, const char_t* name): _wrap(ref), _parent(parent), _name(name) + { + } + + PUGI__FN bool xml_named_node_iterator::operator==(const xml_named_node_iterator& rhs) const + { + return _wrap._root == rhs._wrap._root && _parent._root == rhs._parent._root; + } + + PUGI__FN bool xml_named_node_iterator::operator!=(const xml_named_node_iterator& rhs) const + { + return _wrap._root != rhs._wrap._root || _parent._root != rhs._parent._root; + } + + PUGI__FN xml_node& xml_named_node_iterator::operator*() const + { + assert(_wrap._root); + return _wrap; + } + + PUGI__FN xml_node* xml_named_node_iterator::operator->() const + { + assert(_wrap._root); + return const_cast(&_wrap); // BCC5 workaround + } + + PUGI__FN xml_named_node_iterator& xml_named_node_iterator::operator++() + { + assert(_wrap._root); + _wrap = _wrap.next_sibling(_name); + return *this; + } + + PUGI__FN xml_named_node_iterator xml_named_node_iterator::operator++(int) + { + xml_named_node_iterator temp = *this; + ++*this; + return temp; + } + + PUGI__FN xml_named_node_iterator& xml_named_node_iterator::operator--() + { + if (_wrap._root) + _wrap = _wrap.previous_sibling(_name); + else + { + _wrap = _parent.last_child(); + + if (!impl::strequal(_wrap.name(), _name)) + _wrap = _wrap.previous_sibling(_name); + } + + return *this; + } + + PUGI__FN xml_named_node_iterator xml_named_node_iterator::operator--(int) + { + xml_named_node_iterator temp = *this; + --*this; + return temp; + } + + PUGI__FN xml_parse_result::xml_parse_result(): status(status_internal_error), offset(0), encoding(encoding_auto) + { + } + + PUGI__FN xml_parse_result::operator bool() const + { + return status == status_ok; + } + + PUGI__FN const char* xml_parse_result::description() const + { + switch (status) + { + case status_ok: return "No error"; + + case status_file_not_found: return "File was not found"; + case status_io_error: return "Error reading from file/stream"; + case status_out_of_memory: return "Could not allocate memory"; + case status_internal_error: return "Internal error occurred"; + + case status_unrecognized_tag: return "Could not determine tag type"; + + case status_bad_pi: return "Error parsing document declaration/processing instruction"; + case status_bad_comment: return "Error parsing comment"; + case status_bad_cdata: return "Error parsing CDATA section"; + case status_bad_doctype: return "Error parsing document type declaration"; + case status_bad_pcdata: return "Error parsing PCDATA section"; + case status_bad_start_element: return "Error parsing start element tag"; + case status_bad_attribute: return "Error parsing element attribute"; + case status_bad_end_element: return "Error parsing end element tag"; + case status_end_element_mismatch: return "Start-end tags mismatch"; + + case status_append_invalid_root: return "Unable to append nodes: root is not an element or document"; + + case status_no_document_element: return "No document element found"; + + default: return "Unknown error"; + } + } + + PUGI__FN xml_document::xml_document(): _buffer(0) + { + _create(); + } + + PUGI__FN xml_document::~xml_document() + { + _destroy(); + } + +#ifdef PUGIXML_HAS_MOVE + PUGI__FN xml_document::xml_document(xml_document&& rhs) PUGIXML_NOEXCEPT_IF_NOT_COMPACT: _buffer(0) + { + _create(); + _move(rhs); + } + + PUGI__FN xml_document& xml_document::operator=(xml_document&& rhs) PUGIXML_NOEXCEPT_IF_NOT_COMPACT + { + if (this == &rhs) return *this; + + _destroy(); + _create(); + _move(rhs); + + return *this; + } +#endif + + PUGI__FN void xml_document::reset() + { + _destroy(); + _create(); + } + + PUGI__FN void xml_document::reset(const xml_document& proto) + { + reset(); + + impl::node_copy_tree(_root, proto._root); + } + + PUGI__FN void xml_document::_create() + { + assert(!_root); + + #ifdef PUGIXML_COMPACT + // space for page marker for the first page (uint32_t), rounded up to pointer size; assumes pointers are at least 32-bit + const size_t page_offset = sizeof(void*); + #else + const size_t page_offset = 0; + #endif + + // initialize sentinel page + PUGI__STATIC_ASSERT(sizeof(impl::xml_memory_page) + sizeof(impl::xml_document_struct) + page_offset <= sizeof(_memory)); + + // prepare page structure + impl::xml_memory_page* page = impl::xml_memory_page::construct(_memory); + assert(page); + + page->busy_size = impl::xml_memory_page_size; + + // setup first page marker + #ifdef PUGIXML_COMPACT + // round-trip through void* to avoid 'cast increases required alignment of target type' warning + page->compact_page_marker = reinterpret_cast(static_cast(reinterpret_cast(page) + sizeof(impl::xml_memory_page))); + *page->compact_page_marker = sizeof(impl::xml_memory_page); + #endif + + // allocate new root + _root = new (reinterpret_cast(page) + sizeof(impl::xml_memory_page) + page_offset) impl::xml_document_struct(page); + _root->prev_sibling_c = _root; + + // setup sentinel page + page->allocator = static_cast(_root); + + // setup hash table pointer in allocator + #ifdef PUGIXML_COMPACT + page->allocator->_hash = &static_cast(_root)->hash; + #endif + + // verify the document allocation + assert(reinterpret_cast(_root) + sizeof(impl::xml_document_struct) <= _memory + sizeof(_memory)); + } + + PUGI__FN void xml_document::_destroy() + { + assert(_root); + + // destroy static storage + if (_buffer) + { + impl::xml_memory::deallocate(_buffer); + _buffer = 0; + } + + // destroy extra buffers (note: no need to destroy linked list nodes, they're allocated using document allocator) + for (impl::xml_extra_buffer* extra = static_cast(_root)->extra_buffers; extra; extra = extra->next) + { + if (extra->buffer) impl::xml_memory::deallocate(extra->buffer); + } + + // destroy dynamic storage, leave sentinel page (it's in static memory) + impl::xml_memory_page* root_page = PUGI__GETPAGE(_root); + assert(root_page && !root_page->prev); + assert(reinterpret_cast(root_page) >= _memory && reinterpret_cast(root_page) < _memory + sizeof(_memory)); + + for (impl::xml_memory_page* page = root_page->next; page; ) + { + impl::xml_memory_page* next = page->next; + + impl::xml_allocator::deallocate_page(page); + + page = next; + } + + #ifdef PUGIXML_COMPACT + // destroy hash table + static_cast(_root)->hash.clear(); + #endif + + _root = 0; + } + +#ifdef PUGIXML_HAS_MOVE + PUGI__FN void xml_document::_move(xml_document& rhs) PUGIXML_NOEXCEPT_IF_NOT_COMPACT + { + impl::xml_document_struct* doc = static_cast(_root); + impl::xml_document_struct* other = static_cast(rhs._root); + + // save first child pointer for later; this needs hash access + xml_node_struct* other_first_child = other->first_child; + + #ifdef PUGIXML_COMPACT + // reserve space for the hash table up front; this is the only operation that can fail + // if it does, we have no choice but to throw (if we have exceptions) + if (other_first_child) + { + size_t other_children = 0; + for (xml_node_struct* node = other_first_child; node; node = node->next_sibling) + other_children++; + + // in compact mode, each pointer assignment could result in a hash table request + // during move, we have to relocate document first_child and parents of all children + // normally there's just one child and its parent has a pointerless encoding but + // we assume the worst here + if (!other->_hash->reserve(other_children + 1)) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return; + #else + throw std::bad_alloc(); + #endif + } + } + #endif + + // move allocation state + // note that other->_root may point to the embedded document page, in which case we should keep original (empty) state + if (other->_root != PUGI__GETPAGE(other)) + { + doc->_root = other->_root; + doc->_busy_size = other->_busy_size; + } + + // move buffer state + doc->buffer = other->buffer; + doc->extra_buffers = other->extra_buffers; + _buffer = rhs._buffer; + + #ifdef PUGIXML_COMPACT + // move compact hash; note that the hash table can have pointers to other but they will be "inactive", similarly to nodes removed with remove_child + doc->hash = other->hash; + doc->_hash = &doc->hash; + + // make sure we don't access other hash up until the end when we reinitialize other document + other->_hash = 0; + #endif + + // move page structure + impl::xml_memory_page* doc_page = PUGI__GETPAGE(doc); + assert(doc_page && !doc_page->prev && !doc_page->next); + + impl::xml_memory_page* other_page = PUGI__GETPAGE(other); + assert(other_page && !other_page->prev); + + // relink pages since root page is embedded into xml_document + if (impl::xml_memory_page* page = other_page->next) + { + assert(page->prev == other_page); + + page->prev = doc_page; + + doc_page->next = page; + other_page->next = 0; + } + + // make sure pages point to the correct document state + for (impl::xml_memory_page* page = doc_page->next; page; page = page->next) + { + assert(page->allocator == other); + + page->allocator = doc; + + #ifdef PUGIXML_COMPACT + // this automatically migrates most children between documents and prevents ->parent assignment from allocating + if (page->compact_shared_parent == other) + page->compact_shared_parent = doc; + #endif + } + + // move tree structure + assert(!doc->first_child); + + doc->first_child = other_first_child; + + for (xml_node_struct* node = other_first_child; node; node = node->next_sibling) + { + #ifdef PUGIXML_COMPACT + // most children will have migrated when we reassigned compact_shared_parent + assert(node->parent == other || node->parent == doc); + + node->parent = doc; + #else + assert(node->parent == other); + node->parent = doc; + #endif + } + + // reset other document + new (other) impl::xml_document_struct(PUGI__GETPAGE(other)); + rhs._buffer = 0; + } +#endif + +#ifndef PUGIXML_NO_STL + PUGI__FN xml_parse_result xml_document::load(std::basic_istream >& stream, unsigned int options, xml_encoding encoding) + { + reset(); + + return impl::load_stream_impl(static_cast(_root), stream, options, encoding, &_buffer); + } + + PUGI__FN xml_parse_result xml_document::load(std::basic_istream >& stream, unsigned int options) + { + reset(); + + return impl::load_stream_impl(static_cast(_root), stream, options, encoding_wchar, &_buffer); + } +#endif + + PUGI__FN xml_parse_result xml_document::load_string(const char_t* contents, unsigned int options) + { + // Force native encoding (skip autodetection) + #ifdef PUGIXML_WCHAR_MODE + xml_encoding encoding = encoding_wchar; + #else + xml_encoding encoding = encoding_utf8; + #endif + + return load_buffer(contents, impl::strlength(contents) * sizeof(char_t), options, encoding); + } + + PUGI__FN xml_parse_result xml_document::load(const char_t* contents, unsigned int options) + { + return load_string(contents, options); + } + + PUGI__FN xml_parse_result xml_document::load_file(const char* path_, unsigned int options, xml_encoding encoding) + { + reset(); + + using impl::auto_deleter; // MSVC7 workaround + auto_deleter file(impl::open_file(path_, "rb"), impl::close_file); + + return impl::load_file_impl(static_cast(_root), file.data, options, encoding, &_buffer); + } + + PUGI__FN xml_parse_result xml_document::load_file(const wchar_t* path_, unsigned int options, xml_encoding encoding) + { + reset(); + + using impl::auto_deleter; // MSVC7 workaround + auto_deleter file(impl::open_file_wide(path_, L"rb"), impl::close_file); + + return impl::load_file_impl(static_cast(_root), file.data, options, encoding, &_buffer); + } + + PUGI__FN xml_parse_result xml_document::load_buffer(const void* contents, size_t size, unsigned int options, xml_encoding encoding) + { + reset(); + + return impl::load_buffer_impl(static_cast(_root), _root, const_cast(contents), size, options, encoding, false, false, &_buffer); + } + + PUGI__FN xml_parse_result xml_document::load_buffer_inplace(void* contents, size_t size, unsigned int options, xml_encoding encoding) + { + reset(); + + return impl::load_buffer_impl(static_cast(_root), _root, contents, size, options, encoding, true, false, &_buffer); + } + + PUGI__FN xml_parse_result xml_document::load_buffer_inplace_own(void* contents, size_t size, unsigned int options, xml_encoding encoding) + { + reset(); + + return impl::load_buffer_impl(static_cast(_root), _root, contents, size, options, encoding, true, true, &_buffer); + } + + PUGI__FN void xml_document::save(xml_writer& writer, const char_t* indent, unsigned int flags, xml_encoding encoding) const + { + impl::xml_buffered_writer buffered_writer(writer, encoding); + + if ((flags & format_write_bom) && encoding != encoding_latin1) + { + // BOM always represents the codepoint U+FEFF, so just write it in native encoding + #ifdef PUGIXML_WCHAR_MODE + unsigned int bom = 0xfeff; + buffered_writer.write(static_cast(bom)); + #else + buffered_writer.write('\xef', '\xbb', '\xbf'); + #endif + } + + if (!(flags & format_no_declaration) && !impl::has_declaration(_root)) + { + buffered_writer.write_string(PUGIXML_TEXT("'); + if (!(flags & format_raw)) buffered_writer.write('\n'); + } + + impl::node_output(buffered_writer, _root, indent, flags, 0); + + buffered_writer.flush(); + } + +#ifndef PUGIXML_NO_STL + PUGI__FN void xml_document::save(std::basic_ostream >& stream, const char_t* indent, unsigned int flags, xml_encoding encoding) const + { + xml_writer_stream writer(stream); + + save(writer, indent, flags, encoding); + } + + PUGI__FN void xml_document::save(std::basic_ostream >& stream, const char_t* indent, unsigned int flags) const + { + xml_writer_stream writer(stream); + + save(writer, indent, flags, encoding_wchar); + } +#endif + + PUGI__FN bool xml_document::save_file(const char* path_, const char_t* indent, unsigned int flags, xml_encoding encoding) const + { + using impl::auto_deleter; // MSVC7 workaround + auto_deleter file(impl::open_file(path_, (flags & format_save_file_text) ? "w" : "wb"), impl::close_file); + + return impl::save_file_impl(*this, file.data, indent, flags, encoding); + } + + PUGI__FN bool xml_document::save_file(const wchar_t* path_, const char_t* indent, unsigned int flags, xml_encoding encoding) const + { + using impl::auto_deleter; // MSVC7 workaround + auto_deleter file(impl::open_file_wide(path_, (flags & format_save_file_text) ? L"w" : L"wb"), impl::close_file); + + return impl::save_file_impl(*this, file.data, indent, flags, encoding); + } + + PUGI__FN xml_node xml_document::document_element() const + { + assert(_root); + + for (xml_node_struct* i = _root->first_child; i; i = i->next_sibling) + if (PUGI__NODETYPE(i) == node_element) + return xml_node(i); + + return xml_node(); + } + +#ifndef PUGIXML_NO_STL + PUGI__FN std::string PUGIXML_FUNCTION as_utf8(const wchar_t* str) + { + assert(str); + + return impl::as_utf8_impl(str, impl::strlength_wide(str)); + } + + PUGI__FN std::string PUGIXML_FUNCTION as_utf8(const std::basic_string& str) + { + return impl::as_utf8_impl(str.c_str(), str.size()); + } + + PUGI__FN std::basic_string PUGIXML_FUNCTION as_wide(const char* str) + { + assert(str); + + return impl::as_wide_impl(str, strlen(str)); + } + + PUGI__FN std::basic_string PUGIXML_FUNCTION as_wide(const std::string& str) + { + return impl::as_wide_impl(str.c_str(), str.size()); + } +#endif + + PUGI__FN void PUGIXML_FUNCTION set_memory_management_functions(allocation_function allocate, deallocation_function deallocate) + { + impl::xml_memory::allocate = allocate; + impl::xml_memory::deallocate = deallocate; + } + + PUGI__FN allocation_function PUGIXML_FUNCTION get_memory_allocation_function() + { + return impl::xml_memory::allocate; + } + + PUGI__FN deallocation_function PUGIXML_FUNCTION get_memory_deallocation_function() + { + return impl::xml_memory::deallocate; + } +} + +#if !defined(PUGIXML_NO_STL) && (defined(_MSC_VER) || defined(__ICC)) +namespace std +{ + // Workarounds for (non-standard) iterator category detection for older versions (MSVC7/IC8 and earlier) + PUGI__FN std::bidirectional_iterator_tag _Iter_cat(const pugi::xml_node_iterator&) + { + return std::bidirectional_iterator_tag(); + } + + PUGI__FN std::bidirectional_iterator_tag _Iter_cat(const pugi::xml_attribute_iterator&) + { + return std::bidirectional_iterator_tag(); + } + + PUGI__FN std::bidirectional_iterator_tag _Iter_cat(const pugi::xml_named_node_iterator&) + { + return std::bidirectional_iterator_tag(); + } +} +#endif + +#if !defined(PUGIXML_NO_STL) && defined(__SUNPRO_CC) +namespace std +{ + // Workarounds for (non-standard) iterator category detection + PUGI__FN std::bidirectional_iterator_tag __iterator_category(const pugi::xml_node_iterator&) + { + return std::bidirectional_iterator_tag(); + } + + PUGI__FN std::bidirectional_iterator_tag __iterator_category(const pugi::xml_attribute_iterator&) + { + return std::bidirectional_iterator_tag(); + } + + PUGI__FN std::bidirectional_iterator_tag __iterator_category(const pugi::xml_named_node_iterator&) + { + return std::bidirectional_iterator_tag(); + } +} +#endif + +#ifndef PUGIXML_NO_XPATH +// STL replacements +PUGI__NS_BEGIN + struct equal_to + { + template bool operator()(const T& lhs, const T& rhs) const + { + return lhs == rhs; + } + }; + + struct not_equal_to + { + template bool operator()(const T& lhs, const T& rhs) const + { + return lhs != rhs; + } + }; + + struct less + { + template bool operator()(const T& lhs, const T& rhs) const + { + return lhs < rhs; + } + }; + + struct less_equal + { + template bool operator()(const T& lhs, const T& rhs) const + { + return lhs <= rhs; + } + }; + + template inline void swap(T& lhs, T& rhs) + { + T temp = lhs; + lhs = rhs; + rhs = temp; + } + + template PUGI__FN I min_element(I begin, I end, const Pred& pred) + { + I result = begin; + + for (I it = begin + 1; it != end; ++it) + if (pred(*it, *result)) + result = it; + + return result; + } + + template PUGI__FN void reverse(I begin, I end) + { + while (end - begin > 1) + swap(*begin++, *--end); + } + + template PUGI__FN I unique(I begin, I end) + { + // fast skip head + while (end - begin > 1 && *begin != *(begin + 1)) + begin++; + + if (begin == end) + return begin; + + // last written element + I write = begin++; + + // merge unique elements + while (begin != end) + { + if (*begin != *write) + *++write = *begin++; + else + begin++; + } + + // past-the-end (write points to live element) + return write + 1; + } + + template PUGI__FN void insertion_sort(T* begin, T* end, const Pred& pred) + { + if (begin == end) + return; + + for (T* it = begin + 1; it != end; ++it) + { + T val = *it; + T* hole = it; + + // move hole backwards + while (hole > begin && pred(val, *(hole - 1))) + { + *hole = *(hole - 1); + hole--; + } + + // fill hole with element + *hole = val; + } + } + + template inline I median3(I first, I middle, I last, const Pred& pred) + { + if (pred(*middle, *first)) + swap(middle, first); + if (pred(*last, *middle)) + swap(last, middle); + if (pred(*middle, *first)) + swap(middle, first); + + return middle; + } + + template PUGI__FN void partition3(T* begin, T* end, T pivot, const Pred& pred, T** out_eqbeg, T** out_eqend) + { + // invariant: array is split into 4 groups: = < ? > (each variable denotes the boundary between the groups) + T* eq = begin; + T* lt = begin; + T* gt = end; + + while (lt < gt) + { + if (pred(*lt, pivot)) + lt++; + else if (*lt == pivot) + swap(*eq++, *lt++); + else + swap(*lt, *--gt); + } + + // we now have just 4 groups: = < >; move equal elements to the middle + T* eqbeg = gt; + + for (T* it = begin; it != eq; ++it) + swap(*it, *--eqbeg); + + *out_eqbeg = eqbeg; + *out_eqend = gt; + } + + template PUGI__FN void sort(I begin, I end, const Pred& pred) + { + // sort large chunks + while (end - begin > 16) + { + // find median element + I middle = begin + (end - begin) / 2; + I median = median3(begin, middle, end - 1, pred); + + // partition in three chunks (< = >) + I eqbeg, eqend; + partition3(begin, end, *median, pred, &eqbeg, &eqend); + + // loop on larger half + if (eqbeg - begin > end - eqend) + { + sort(eqend, end, pred); + end = eqbeg; + } + else + { + sort(begin, eqbeg, pred); + begin = eqend; + } + } + + // insertion sort small chunk + insertion_sort(begin, end, pred); + } + + PUGI__FN bool hash_insert(const void** table, size_t size, const void* key) + { + assert(key); + + unsigned int h = static_cast(reinterpret_cast(key)); + + // MurmurHash3 32-bit finalizer + h ^= h >> 16; + h *= 0x85ebca6bu; + h ^= h >> 13; + h *= 0xc2b2ae35u; + h ^= h >> 16; + + size_t hashmod = size - 1; + size_t bucket = h & hashmod; + + for (size_t probe = 0; probe <= hashmod; ++probe) + { + if (table[bucket] == 0) + { + table[bucket] = key; + return true; + } + + if (table[bucket] == key) + return false; + + // hash collision, quadratic probing + bucket = (bucket + probe + 1) & hashmod; + } + + assert(false && "Hash table is full"); // unreachable + return false; + } +PUGI__NS_END + +// Allocator used for AST and evaluation stacks +PUGI__NS_BEGIN + static const size_t xpath_memory_page_size = + #ifdef PUGIXML_MEMORY_XPATH_PAGE_SIZE + PUGIXML_MEMORY_XPATH_PAGE_SIZE + #else + 4096 + #endif + ; + + static const uintptr_t xpath_memory_block_alignment = sizeof(double) > sizeof(void*) ? sizeof(double) : sizeof(void*); + + struct xpath_memory_block + { + xpath_memory_block* next; + size_t capacity; + + union + { + char data[xpath_memory_page_size]; + double alignment; + }; + }; + + struct xpath_allocator + { + xpath_memory_block* _root; + size_t _root_size; + bool* _error; + + xpath_allocator(xpath_memory_block* root, bool* error = 0): _root(root), _root_size(0), _error(error) + { + } + + void* allocate(size_t size) + { + // round size up to block alignment boundary + size = (size + xpath_memory_block_alignment - 1) & ~(xpath_memory_block_alignment - 1); + + if (_root_size + size <= _root->capacity) + { + void* buf = &_root->data[0] + _root_size; + _root_size += size; + return buf; + } + else + { + // make sure we have at least 1/4th of the page free after allocation to satisfy subsequent allocation requests + size_t block_capacity_base = sizeof(_root->data); + size_t block_capacity_req = size + block_capacity_base / 4; + size_t block_capacity = (block_capacity_base > block_capacity_req) ? block_capacity_base : block_capacity_req; + + size_t block_size = block_capacity + offsetof(xpath_memory_block, data); + + xpath_memory_block* block = static_cast(xml_memory::allocate(block_size)); + if (!block) + { + if (_error) *_error = true; + return 0; + } + + block->next = _root; + block->capacity = block_capacity; + + _root = block; + _root_size = size; + + return block->data; + } + } + + void* reallocate(void* ptr, size_t old_size, size_t new_size) + { + // round size up to block alignment boundary + old_size = (old_size + xpath_memory_block_alignment - 1) & ~(xpath_memory_block_alignment - 1); + new_size = (new_size + xpath_memory_block_alignment - 1) & ~(xpath_memory_block_alignment - 1); + + // we can only reallocate the last object + assert(ptr == 0 || static_cast(ptr) + old_size == &_root->data[0] + _root_size); + + // try to reallocate the object inplace + if (ptr && _root_size - old_size + new_size <= _root->capacity) + { + _root_size = _root_size - old_size + new_size; + return ptr; + } + + // allocate a new block + void* result = allocate(new_size); + if (!result) return 0; + + // we have a new block + if (ptr) + { + // copy old data (we only support growing) + assert(new_size >= old_size); + memcpy(result, ptr, old_size); + + // free the previous page if it had no other objects + assert(_root->data == result); + assert(_root->next); + + if (_root->next->data == ptr) + { + // deallocate the whole page, unless it was the first one + xpath_memory_block* next = _root->next->next; + + if (next) + { + xml_memory::deallocate(_root->next); + _root->next = next; + } + } + } + + return result; + } + + void revert(const xpath_allocator& state) + { + // free all new pages + xpath_memory_block* cur = _root; + + while (cur != state._root) + { + xpath_memory_block* next = cur->next; + + xml_memory::deallocate(cur); + + cur = next; + } + + // restore state + _root = state._root; + _root_size = state._root_size; + } + + void release() + { + xpath_memory_block* cur = _root; + assert(cur); + + while (cur->next) + { + xpath_memory_block* next = cur->next; + + xml_memory::deallocate(cur); + + cur = next; + } + } + }; + + struct xpath_allocator_capture + { + xpath_allocator_capture(xpath_allocator* alloc): _target(alloc), _state(*alloc) + { + } + + ~xpath_allocator_capture() + { + _target->revert(_state); + } + + xpath_allocator* _target; + xpath_allocator _state; + }; + + struct xpath_stack + { + xpath_allocator* result; + xpath_allocator* temp; + }; + + struct xpath_stack_data + { + xpath_memory_block blocks[2]; + xpath_allocator result; + xpath_allocator temp; + xpath_stack stack; + bool oom; + + xpath_stack_data(): result(blocks + 0, &oom), temp(blocks + 1, &oom), oom(false) + { + blocks[0].next = blocks[1].next = 0; + blocks[0].capacity = blocks[1].capacity = sizeof(blocks[0].data); + + stack.result = &result; + stack.temp = &temp; + } + + ~xpath_stack_data() + { + result.release(); + temp.release(); + } + }; +PUGI__NS_END + +// String class +PUGI__NS_BEGIN + class xpath_string + { + const char_t* _buffer; + bool _uses_heap; + size_t _length_heap; + + static char_t* duplicate_string(const char_t* string, size_t length, xpath_allocator* alloc) + { + char_t* result = static_cast(alloc->allocate((length + 1) * sizeof(char_t))); + if (!result) return 0; + + memcpy(result, string, length * sizeof(char_t)); + result[length] = 0; + + return result; + } + + xpath_string(const char_t* buffer, bool uses_heap_, size_t length_heap): _buffer(buffer), _uses_heap(uses_heap_), _length_heap(length_heap) + { + } + + public: + static xpath_string from_const(const char_t* str) + { + return xpath_string(str, false, 0); + } + + static xpath_string from_heap_preallocated(const char_t* begin, const char_t* end) + { + assert(begin <= end && *end == 0); + + return xpath_string(begin, true, static_cast(end - begin)); + } + + static xpath_string from_heap(const char_t* begin, const char_t* end, xpath_allocator* alloc) + { + assert(begin <= end); + + if (begin == end) + return xpath_string(); + + size_t length = static_cast(end - begin); + const char_t* data = duplicate_string(begin, length, alloc); + + return data ? xpath_string(data, true, length) : xpath_string(); + } + + xpath_string(): _buffer(PUGIXML_TEXT("")), _uses_heap(false), _length_heap(0) + { + } + + void append(const xpath_string& o, xpath_allocator* alloc) + { + // skip empty sources + if (!*o._buffer) return; + + // fast append for constant empty target and constant source + if (!*_buffer && !_uses_heap && !o._uses_heap) + { + _buffer = o._buffer; + } + else + { + // need to make heap copy + size_t target_length = length(); + size_t source_length = o.length(); + size_t result_length = target_length + source_length; + + // allocate new buffer + char_t* result = static_cast(alloc->reallocate(_uses_heap ? const_cast(_buffer) : 0, (target_length + 1) * sizeof(char_t), (result_length + 1) * sizeof(char_t))); + if (!result) return; + + // append first string to the new buffer in case there was no reallocation + if (!_uses_heap) memcpy(result, _buffer, target_length * sizeof(char_t)); + + // append second string to the new buffer + memcpy(result + target_length, o._buffer, source_length * sizeof(char_t)); + result[result_length] = 0; + + // finalize + _buffer = result; + _uses_heap = true; + _length_heap = result_length; + } + } + + const char_t* c_str() const + { + return _buffer; + } + + size_t length() const + { + return _uses_heap ? _length_heap : strlength(_buffer); + } + + char_t* data(xpath_allocator* alloc) + { + // make private heap copy + if (!_uses_heap) + { + size_t length_ = strlength(_buffer); + const char_t* data_ = duplicate_string(_buffer, length_, alloc); + + if (!data_) return 0; + + _buffer = data_; + _uses_heap = true; + _length_heap = length_; + } + + return const_cast(_buffer); + } + + bool empty() const + { + return *_buffer == 0; + } + + bool operator==(const xpath_string& o) const + { + return strequal(_buffer, o._buffer); + } + + bool operator!=(const xpath_string& o) const + { + return !strequal(_buffer, o._buffer); + } + + bool uses_heap() const + { + return _uses_heap; + } + }; +PUGI__NS_END + +PUGI__NS_BEGIN + PUGI__FN bool starts_with(const char_t* string, const char_t* pattern) + { + while (*pattern && *string == *pattern) + { + string++; + pattern++; + } + + return *pattern == 0; + } + + PUGI__FN const char_t* find_char(const char_t* s, char_t c) + { + #ifdef PUGIXML_WCHAR_MODE + return wcschr(s, c); + #else + return strchr(s, c); + #endif + } + + PUGI__FN const char_t* find_substring(const char_t* s, const char_t* p) + { + #ifdef PUGIXML_WCHAR_MODE + // MSVC6 wcsstr bug workaround (if s is empty it always returns 0) + return (*p == 0) ? s : wcsstr(s, p); + #else + return strstr(s, p); + #endif + } + + // Converts symbol to lower case, if it is an ASCII one + PUGI__FN char_t tolower_ascii(char_t ch) + { + return static_cast(ch - 'A') < 26 ? static_cast(ch | ' ') : ch; + } + + PUGI__FN xpath_string string_value(const xpath_node& na, xpath_allocator* alloc) + { + if (na.attribute()) + return xpath_string::from_const(na.attribute().value()); + else + { + xml_node n = na.node(); + + switch (n.type()) + { + case node_pcdata: + case node_cdata: + case node_comment: + case node_pi: + return xpath_string::from_const(n.value()); + + case node_document: + case node_element: + { + xpath_string result; + + // element nodes can have value if parse_embed_pcdata was used + if (n.value()[0]) + result.append(xpath_string::from_const(n.value()), alloc); + + xml_node cur = n.first_child(); + + while (cur && cur != n) + { + if (cur.type() == node_pcdata || cur.type() == node_cdata) + result.append(xpath_string::from_const(cur.value()), alloc); + + if (cur.first_child()) + cur = cur.first_child(); + else if (cur.next_sibling()) + cur = cur.next_sibling(); + else + { + while (!cur.next_sibling() && cur != n) + cur = cur.parent(); + + if (cur != n) cur = cur.next_sibling(); + } + } + + return result; + } + + default: + return xpath_string(); + } + } + } + + PUGI__FN bool node_is_before_sibling(xml_node_struct* ln, xml_node_struct* rn) + { + assert(ln->parent == rn->parent); + + // there is no common ancestor (the shared parent is null), nodes are from different documents + if (!ln->parent) return ln < rn; + + // determine sibling order + xml_node_struct* ls = ln; + xml_node_struct* rs = rn; + + while (ls && rs) + { + if (ls == rn) return true; + if (rs == ln) return false; + + ls = ls->next_sibling; + rs = rs->next_sibling; + } + + // if rn sibling chain ended ln must be before rn + return !rs; + } + + PUGI__FN bool node_is_before(xml_node_struct* ln, xml_node_struct* rn) + { + // find common ancestor at the same depth, if any + xml_node_struct* lp = ln; + xml_node_struct* rp = rn; + + while (lp && rp && lp->parent != rp->parent) + { + lp = lp->parent; + rp = rp->parent; + } + + // parents are the same! + if (lp && rp) return node_is_before_sibling(lp, rp); + + // nodes are at different depths, need to normalize heights + bool left_higher = !lp; + + while (lp) + { + lp = lp->parent; + ln = ln->parent; + } + + while (rp) + { + rp = rp->parent; + rn = rn->parent; + } + + // one node is the ancestor of the other + if (ln == rn) return left_higher; + + // find common ancestor... again + while (ln->parent != rn->parent) + { + ln = ln->parent; + rn = rn->parent; + } + + return node_is_before_sibling(ln, rn); + } + + PUGI__FN bool node_is_ancestor(xml_node_struct* parent, xml_node_struct* node) + { + while (node && node != parent) node = node->parent; + + return parent && node == parent; + } + + PUGI__FN const void* document_buffer_order(const xpath_node& xnode) + { + xml_node_struct* node = xnode.node().internal_object(); + + if (node) + { + if ((get_document(node).header & xml_memory_page_contents_shared_mask) == 0) + { + if (node->name && (node->header & impl::xml_memory_page_name_allocated_or_shared_mask) == 0) return node->name; + if (node->value && (node->header & impl::xml_memory_page_value_allocated_or_shared_mask) == 0) return node->value; + } + + return 0; + } + + xml_attribute_struct* attr = xnode.attribute().internal_object(); + + if (attr) + { + if ((get_document(attr).header & xml_memory_page_contents_shared_mask) == 0) + { + if ((attr->header & impl::xml_memory_page_name_allocated_or_shared_mask) == 0) return attr->name; + if ((attr->header & impl::xml_memory_page_value_allocated_or_shared_mask) == 0) return attr->value; + } + + return 0; + } + + return 0; + } + + struct document_order_comparator + { + bool operator()(const xpath_node& lhs, const xpath_node& rhs) const + { + // optimized document order based check + const void* lo = document_buffer_order(lhs); + const void* ro = document_buffer_order(rhs); + + if (lo && ro) return lo < ro; + + // slow comparison + xml_node ln = lhs.node(), rn = rhs.node(); + + // compare attributes + if (lhs.attribute() && rhs.attribute()) + { + // shared parent + if (lhs.parent() == rhs.parent()) + { + // determine sibling order + for (xml_attribute a = lhs.attribute(); a; a = a.next_attribute()) + if (a == rhs.attribute()) + return true; + + return false; + } + + // compare attribute parents + ln = lhs.parent(); + rn = rhs.parent(); + } + else if (lhs.attribute()) + { + // attributes go after the parent element + if (lhs.parent() == rhs.node()) return false; + + ln = lhs.parent(); + } + else if (rhs.attribute()) + { + // attributes go after the parent element + if (rhs.parent() == lhs.node()) return true; + + rn = rhs.parent(); + } + + if (ln == rn) return false; + + if (!ln || !rn) return ln < rn; + + return node_is_before(ln.internal_object(), rn.internal_object()); + } + }; + + PUGI__FN double gen_nan() + { + #if defined(__STDC_IEC_559__) || ((FLT_RADIX - 0 == 2) && (FLT_MAX_EXP - 0 == 128) && (FLT_MANT_DIG - 0 == 24)) + PUGI__STATIC_ASSERT(sizeof(float) == sizeof(uint32_t)); + typedef uint32_t UI; // BCC5 workaround + union { float f; UI i; } u; + u.i = 0x7fc00000; + return double(u.f); + #else + // fallback + const volatile double zero = 0.0; + return zero / zero; + #endif + } + + PUGI__FN bool is_nan(double value) + { + #if defined(PUGI__MSVC_CRT_VERSION) || defined(__BORLANDC__) + return !!_isnan(value); + #elif defined(fpclassify) && defined(FP_NAN) + return fpclassify(value) == FP_NAN; + #else + // fallback + const volatile double v = value; + return v != v; + #endif + } + + PUGI__FN const char_t* convert_number_to_string_special(double value) + { + #if defined(PUGI__MSVC_CRT_VERSION) || defined(__BORLANDC__) + if (_finite(value)) return (value == 0) ? PUGIXML_TEXT("0") : 0; + if (_isnan(value)) return PUGIXML_TEXT("NaN"); + return value > 0 ? PUGIXML_TEXT("Infinity") : PUGIXML_TEXT("-Infinity"); + #elif defined(fpclassify) && defined(FP_NAN) && defined(FP_INFINITE) && defined(FP_ZERO) + switch (fpclassify(value)) + { + case FP_NAN: + return PUGIXML_TEXT("NaN"); + + case FP_INFINITE: + return value > 0 ? PUGIXML_TEXT("Infinity") : PUGIXML_TEXT("-Infinity"); + + case FP_ZERO: + return PUGIXML_TEXT("0"); + + default: + return 0; + } + #else + // fallback + const volatile double v = value; + + if (v == 0) return PUGIXML_TEXT("0"); + if (v != v) return PUGIXML_TEXT("NaN"); + if (v * 2 == v) return value > 0 ? PUGIXML_TEXT("Infinity") : PUGIXML_TEXT("-Infinity"); + return 0; + #endif + } + + PUGI__FN bool convert_number_to_boolean(double value) + { + return (value != 0 && !is_nan(value)); + } + + PUGI__FN void truncate_zeros(char* begin, char* end) + { + while (begin != end && end[-1] == '0') end--; + + *end = 0; + } + + // gets mantissa digits in the form of 0.xxxxx with 0. implied and the exponent +#if defined(PUGI__MSVC_CRT_VERSION) && PUGI__MSVC_CRT_VERSION >= 1400 + PUGI__FN void convert_number_to_mantissa_exponent(double value, char (&buffer)[32], char** out_mantissa, int* out_exponent) + { + // get base values + int sign, exponent; + _ecvt_s(buffer, sizeof(buffer), value, DBL_DIG + 1, &exponent, &sign); + + // truncate redundant zeros + truncate_zeros(buffer, buffer + strlen(buffer)); + + // fill results + *out_mantissa = buffer; + *out_exponent = exponent; + } +#else + PUGI__FN void convert_number_to_mantissa_exponent(double value, char (&buffer)[32], char** out_mantissa, int* out_exponent) + { + // get a scientific notation value with IEEE DBL_DIG decimals + PUGI__SNPRINTF(buffer, "%.*e", DBL_DIG, value); + + // get the exponent (possibly negative) + char* exponent_string = strchr(buffer, 'e'); + assert(exponent_string); + + int exponent = atoi(exponent_string + 1); + + // extract mantissa string: skip sign + char* mantissa = buffer[0] == '-' ? buffer + 1 : buffer; + assert(mantissa[0] != '0' && mantissa[1] == '.'); + + // divide mantissa by 10 to eliminate integer part + mantissa[1] = mantissa[0]; + mantissa++; + exponent++; + + // remove extra mantissa digits and zero-terminate mantissa + truncate_zeros(mantissa, exponent_string); + + // fill results + *out_mantissa = mantissa; + *out_exponent = exponent; + } +#endif + + PUGI__FN xpath_string convert_number_to_string(double value, xpath_allocator* alloc) + { + // try special number conversion + const char_t* special = convert_number_to_string_special(value); + if (special) return xpath_string::from_const(special); + + // get mantissa + exponent form + char mantissa_buffer[32]; + + char* mantissa; + int exponent; + convert_number_to_mantissa_exponent(value, mantissa_buffer, &mantissa, &exponent); + + // allocate a buffer of suitable length for the number + size_t result_size = strlen(mantissa_buffer) + (exponent > 0 ? exponent : -exponent) + 4; + char_t* result = static_cast(alloc->allocate(sizeof(char_t) * result_size)); + if (!result) return xpath_string(); + + // make the number! + char_t* s = result; + + // sign + if (value < 0) *s++ = '-'; + + // integer part + if (exponent <= 0) + { + *s++ = '0'; + } + else + { + while (exponent > 0) + { + assert(*mantissa == 0 || static_cast(*mantissa - '0') <= 9); + *s++ = *mantissa ? *mantissa++ : '0'; + exponent--; + } + } + + // fractional part + if (*mantissa) + { + // decimal point + *s++ = '.'; + + // extra zeroes from negative exponent + while (exponent < 0) + { + *s++ = '0'; + exponent++; + } + + // extra mantissa digits + while (*mantissa) + { + assert(static_cast(*mantissa - '0') <= 9); + *s++ = *mantissa++; + } + } + + // zero-terminate + assert(s < result + result_size); + *s = 0; + + return xpath_string::from_heap_preallocated(result, s); + } + + PUGI__FN bool check_string_to_number_format(const char_t* string) + { + // parse leading whitespace + while (PUGI__IS_CHARTYPE(*string, ct_space)) ++string; + + // parse sign + if (*string == '-') ++string; + + if (!*string) return false; + + // if there is no integer part, there should be a decimal part with at least one digit + if (!PUGI__IS_CHARTYPEX(string[0], ctx_digit) && (string[0] != '.' || !PUGI__IS_CHARTYPEX(string[1], ctx_digit))) return false; + + // parse integer part + while (PUGI__IS_CHARTYPEX(*string, ctx_digit)) ++string; + + // parse decimal part + if (*string == '.') + { + ++string; + + while (PUGI__IS_CHARTYPEX(*string, ctx_digit)) ++string; + } + + // parse trailing whitespace + while (PUGI__IS_CHARTYPE(*string, ct_space)) ++string; + + return *string == 0; + } + + PUGI__FN double convert_string_to_number(const char_t* string) + { + // check string format + if (!check_string_to_number_format(string)) return gen_nan(); + + // parse string + #ifdef PUGIXML_WCHAR_MODE + return wcstod(string, 0); + #else + return strtod(string, 0); + #endif + } + + PUGI__FN bool convert_string_to_number_scratch(char_t (&buffer)[32], const char_t* begin, const char_t* end, double* out_result) + { + size_t length = static_cast(end - begin); + char_t* scratch = buffer; + + if (length >= sizeof(buffer) / sizeof(buffer[0])) + { + // need to make dummy on-heap copy + scratch = static_cast(xml_memory::allocate((length + 1) * sizeof(char_t))); + if (!scratch) return false; + } + + // copy string to zero-terminated buffer and perform conversion + memcpy(scratch, begin, length * sizeof(char_t)); + scratch[length] = 0; + + *out_result = convert_string_to_number(scratch); + + // free dummy buffer + if (scratch != buffer) xml_memory::deallocate(scratch); + + return true; + } + + PUGI__FN double round_nearest(double value) + { + return floor(value + 0.5); + } + + PUGI__FN double round_nearest_nzero(double value) + { + // same as round_nearest, but returns -0 for [-0.5, -0] + // ceil is used to differentiate between +0 and -0 (we return -0 for [-0.5, -0] and +0 for +0) + return (value >= -0.5 && value <= 0) ? ceil(value) : floor(value + 0.5); + } + + PUGI__FN const char_t* qualified_name(const xpath_node& node) + { + return node.attribute() ? node.attribute().name() : node.node().name(); + } + + PUGI__FN const char_t* local_name(const xpath_node& node) + { + const char_t* name = qualified_name(node); + const char_t* p = find_char(name, ':'); + + return p ? p + 1 : name; + } + + struct namespace_uri_predicate + { + const char_t* prefix; + size_t prefix_length; + + namespace_uri_predicate(const char_t* name) + { + const char_t* pos = find_char(name, ':'); + + prefix = pos ? name : 0; + prefix_length = pos ? static_cast(pos - name) : 0; + } + + bool operator()(xml_attribute a) const + { + const char_t* name = a.name(); + + if (!starts_with(name, PUGIXML_TEXT("xmlns"))) return false; + + return prefix ? name[5] == ':' && strequalrange(name + 6, prefix, prefix_length) : name[5] == 0; + } + }; + + PUGI__FN const char_t* namespace_uri(xml_node node) + { + namespace_uri_predicate pred = node.name(); + + xml_node p = node; + + while (p) + { + xml_attribute a = p.find_attribute(pred); + + if (a) return a.value(); + + p = p.parent(); + } + + return PUGIXML_TEXT(""); + } + + PUGI__FN const char_t* namespace_uri(xml_attribute attr, xml_node parent) + { + namespace_uri_predicate pred = attr.name(); + + // Default namespace does not apply to attributes + if (!pred.prefix) return PUGIXML_TEXT(""); + + xml_node p = parent; + + while (p) + { + xml_attribute a = p.find_attribute(pred); + + if (a) return a.value(); + + p = p.parent(); + } + + return PUGIXML_TEXT(""); + } + + PUGI__FN const char_t* namespace_uri(const xpath_node& node) + { + return node.attribute() ? namespace_uri(node.attribute(), node.parent()) : namespace_uri(node.node()); + } + + PUGI__FN char_t* normalize_space(char_t* buffer) + { + char_t* write = buffer; + + for (char_t* it = buffer; *it; ) + { + char_t ch = *it++; + + if (PUGI__IS_CHARTYPE(ch, ct_space)) + { + // replace whitespace sequence with single space + while (PUGI__IS_CHARTYPE(*it, ct_space)) it++; + + // avoid leading spaces + if (write != buffer) *write++ = ' '; + } + else *write++ = ch; + } + + // remove trailing space + if (write != buffer && PUGI__IS_CHARTYPE(write[-1], ct_space)) write--; + + // zero-terminate + *write = 0; + + return write; + } + + PUGI__FN char_t* translate(char_t* buffer, const char_t* from, const char_t* to, size_t to_length) + { + char_t* write = buffer; + + while (*buffer) + { + PUGI__DMC_VOLATILE char_t ch = *buffer++; + + const char_t* pos = find_char(from, ch); + + if (!pos) + *write++ = ch; // do not process + else if (static_cast(pos - from) < to_length) + *write++ = to[pos - from]; // replace + } + + // zero-terminate + *write = 0; + + return write; + } + + PUGI__FN unsigned char* translate_table_generate(xpath_allocator* alloc, const char_t* from, const char_t* to) + { + unsigned char table[128] = {0}; + + while (*from) + { + unsigned int fc = static_cast(*from); + unsigned int tc = static_cast(*to); + + if (fc >= 128 || tc >= 128) + return 0; + + // code=128 means "skip character" + if (!table[fc]) + table[fc] = static_cast(tc ? tc : 128); + + from++; + if (tc) to++; + } + + for (int i = 0; i < 128; ++i) + if (!table[i]) + table[i] = static_cast(i); + + void* result = alloc->allocate(sizeof(table)); + if (!result) return 0; + + memcpy(result, table, sizeof(table)); + + return static_cast(result); + } + + PUGI__FN char_t* translate_table(char_t* buffer, const unsigned char* table) + { + char_t* write = buffer; + + while (*buffer) + { + char_t ch = *buffer++; + unsigned int index = static_cast(ch); + + if (index < 128) + { + unsigned char code = table[index]; + + // code=128 means "skip character" (table size is 128 so 128 can be a special value) + // this code skips these characters without extra branches + *write = static_cast(code); + write += 1 - (code >> 7); + } + else + { + *write++ = ch; + } + } + + // zero-terminate + *write = 0; + + return write; + } + + inline bool is_xpath_attribute(const char_t* name) + { + return !(starts_with(name, PUGIXML_TEXT("xmlns")) && (name[5] == 0 || name[5] == ':')); + } + + struct xpath_variable_boolean: xpath_variable + { + xpath_variable_boolean(): xpath_variable(xpath_type_boolean), value(false) + { + } + + bool value; + char_t name[1]; + }; + + struct xpath_variable_number: xpath_variable + { + xpath_variable_number(): xpath_variable(xpath_type_number), value(0) + { + } + + double value; + char_t name[1]; + }; + + struct xpath_variable_string: xpath_variable + { + xpath_variable_string(): xpath_variable(xpath_type_string), value(0) + { + } + + ~xpath_variable_string() + { + if (value) xml_memory::deallocate(value); + } + + char_t* value; + char_t name[1]; + }; + + struct xpath_variable_node_set: xpath_variable + { + xpath_variable_node_set(): xpath_variable(xpath_type_node_set) + { + } + + xpath_node_set value; + char_t name[1]; + }; + + static const xpath_node_set dummy_node_set; + + PUGI__FN PUGI__UNSIGNED_OVERFLOW unsigned int hash_string(const char_t* str) + { + // Jenkins one-at-a-time hash (http://en.wikipedia.org/wiki/Jenkins_hash_function#one-at-a-time) + unsigned int result = 0; + + while (*str) + { + result += static_cast(*str++); + result += result << 10; + result ^= result >> 6; + } + + result += result << 3; + result ^= result >> 11; + result += result << 15; + + return result; + } + + template PUGI__FN T* new_xpath_variable(const char_t* name) + { + size_t length = strlength(name); + if (length == 0) return 0; // empty variable names are invalid + + // $$ we can't use offsetof(T, name) because T is non-POD, so we just allocate additional length characters + void* memory = xml_memory::allocate(sizeof(T) + length * sizeof(char_t)); + if (!memory) return 0; + + T* result = new (memory) T(); + + memcpy(result->name, name, (length + 1) * sizeof(char_t)); + + return result; + } + + PUGI__FN xpath_variable* new_xpath_variable(xpath_value_type type, const char_t* name) + { + switch (type) + { + case xpath_type_node_set: + return new_xpath_variable(name); + + case xpath_type_number: + return new_xpath_variable(name); + + case xpath_type_string: + return new_xpath_variable(name); + + case xpath_type_boolean: + return new_xpath_variable(name); + + default: + return 0; + } + } + + template PUGI__FN void delete_xpath_variable(T* var) + { + var->~T(); + xml_memory::deallocate(var); + } + + PUGI__FN void delete_xpath_variable(xpath_value_type type, xpath_variable* var) + { + switch (type) + { + case xpath_type_node_set: + delete_xpath_variable(static_cast(var)); + break; + + case xpath_type_number: + delete_xpath_variable(static_cast(var)); + break; + + case xpath_type_string: + delete_xpath_variable(static_cast(var)); + break; + + case xpath_type_boolean: + delete_xpath_variable(static_cast(var)); + break; + + default: + assert(false && "Invalid variable type"); // unreachable + } + } + + PUGI__FN bool copy_xpath_variable(xpath_variable* lhs, const xpath_variable* rhs) + { + switch (rhs->type()) + { + case xpath_type_node_set: + return lhs->set(static_cast(rhs)->value); + + case xpath_type_number: + return lhs->set(static_cast(rhs)->value); + + case xpath_type_string: + return lhs->set(static_cast(rhs)->value); + + case xpath_type_boolean: + return lhs->set(static_cast(rhs)->value); + + default: + assert(false && "Invalid variable type"); // unreachable + return false; + } + } + + PUGI__FN bool get_variable_scratch(char_t (&buffer)[32], xpath_variable_set* set, const char_t* begin, const char_t* end, xpath_variable** out_result) + { + size_t length = static_cast(end - begin); + char_t* scratch = buffer; + + if (length >= sizeof(buffer) / sizeof(buffer[0])) + { + // need to make dummy on-heap copy + scratch = static_cast(xml_memory::allocate((length + 1) * sizeof(char_t))); + if (!scratch) return false; + } + + // copy string to zero-terminated buffer and perform lookup + memcpy(scratch, begin, length * sizeof(char_t)); + scratch[length] = 0; + + *out_result = set->get(scratch); + + // free dummy buffer + if (scratch != buffer) xml_memory::deallocate(scratch); + + return true; + } +PUGI__NS_END + +// Internal node set class +PUGI__NS_BEGIN + PUGI__FN xpath_node_set::type_t xpath_get_order(const xpath_node* begin, const xpath_node* end) + { + if (end - begin < 2) + return xpath_node_set::type_sorted; + + document_order_comparator cmp; + + bool first = cmp(begin[0], begin[1]); + + for (const xpath_node* it = begin + 1; it + 1 < end; ++it) + if (cmp(it[0], it[1]) != first) + return xpath_node_set::type_unsorted; + + return first ? xpath_node_set::type_sorted : xpath_node_set::type_sorted_reverse; + } + + PUGI__FN xpath_node_set::type_t xpath_sort(xpath_node* begin, xpath_node* end, xpath_node_set::type_t type, bool rev) + { + xpath_node_set::type_t order = rev ? xpath_node_set::type_sorted_reverse : xpath_node_set::type_sorted; + + if (type == xpath_node_set::type_unsorted) + { + xpath_node_set::type_t sorted = xpath_get_order(begin, end); + + if (sorted == xpath_node_set::type_unsorted) + { + sort(begin, end, document_order_comparator()); + + type = xpath_node_set::type_sorted; + } + else + type = sorted; + } + + if (type != order) reverse(begin, end); + + return order; + } + + PUGI__FN xpath_node xpath_first(const xpath_node* begin, const xpath_node* end, xpath_node_set::type_t type) + { + if (begin == end) return xpath_node(); + + switch (type) + { + case xpath_node_set::type_sorted: + return *begin; + + case xpath_node_set::type_sorted_reverse: + return *(end - 1); + + case xpath_node_set::type_unsorted: + return *min_element(begin, end, document_order_comparator()); + + default: + assert(false && "Invalid node set type"); // unreachable + return xpath_node(); + } + } + + class xpath_node_set_raw + { + xpath_node_set::type_t _type; + + xpath_node* _begin; + xpath_node* _end; + xpath_node* _eos; + + public: + xpath_node_set_raw(): _type(xpath_node_set::type_unsorted), _begin(0), _end(0), _eos(0) + { + } + + xpath_node* begin() const + { + return _begin; + } + + xpath_node* end() const + { + return _end; + } + + bool empty() const + { + return _begin == _end; + } + + size_t size() const + { + return static_cast(_end - _begin); + } + + xpath_node first() const + { + return xpath_first(_begin, _end, _type); + } + + void push_back_grow(const xpath_node& node, xpath_allocator* alloc); + + void push_back(const xpath_node& node, xpath_allocator* alloc) + { + if (_end != _eos) + *_end++ = node; + else + push_back_grow(node, alloc); + } + + void append(const xpath_node* begin_, const xpath_node* end_, xpath_allocator* alloc) + { + if (begin_ == end_) return; + + size_t size_ = static_cast(_end - _begin); + size_t capacity = static_cast(_eos - _begin); + size_t count = static_cast(end_ - begin_); + + if (size_ + count > capacity) + { + // reallocate the old array or allocate a new one + xpath_node* data = static_cast(alloc->reallocate(_begin, capacity * sizeof(xpath_node), (size_ + count) * sizeof(xpath_node))); + if (!data) return; + + // finalize + _begin = data; + _end = data + size_; + _eos = data + size_ + count; + } + + memcpy(_end, begin_, count * sizeof(xpath_node)); + _end += count; + } + + void sort_do() + { + _type = xpath_sort(_begin, _end, _type, false); + } + + void truncate(xpath_node* pos) + { + assert(_begin <= pos && pos <= _end); + + _end = pos; + } + + void remove_duplicates(xpath_allocator* alloc) + { + if (_type == xpath_node_set::type_unsorted && _end - _begin > 2) + { + xpath_allocator_capture cr(alloc); + + size_t size_ = static_cast(_end - _begin); + + size_t hash_size = 1; + while (hash_size < size_ + size_ / 2) hash_size *= 2; + + const void** hash_data = static_cast(alloc->allocate(hash_size * sizeof(void**))); + if (!hash_data) return; + + memset(hash_data, 0, hash_size * sizeof(const void**)); + + xpath_node* write = _begin; + + for (xpath_node* it = _begin; it != _end; ++it) + { + const void* attr = it->attribute().internal_object(); + const void* node = it->node().internal_object(); + const void* key = attr ? attr : node; + + if (key && hash_insert(hash_data, hash_size, key)) + { + *write++ = *it; + } + } + + _end = write; + } + else + { + _end = unique(_begin, _end); + } + } + + xpath_node_set::type_t type() const + { + return _type; + } + + void set_type(xpath_node_set::type_t value) + { + _type = value; + } + }; + + PUGI__FN_NO_INLINE void xpath_node_set_raw::push_back_grow(const xpath_node& node, xpath_allocator* alloc) + { + size_t capacity = static_cast(_eos - _begin); + + // get new capacity (1.5x rule) + size_t new_capacity = capacity + capacity / 2 + 1; + + // reallocate the old array or allocate a new one + xpath_node* data = static_cast(alloc->reallocate(_begin, capacity * sizeof(xpath_node), new_capacity * sizeof(xpath_node))); + if (!data) return; + + // finalize + _begin = data; + _end = data + capacity; + _eos = data + new_capacity; + + // push + *_end++ = node; + } +PUGI__NS_END + +PUGI__NS_BEGIN + struct xpath_context + { + xpath_node n; + size_t position, size; + + xpath_context(const xpath_node& n_, size_t position_, size_t size_): n(n_), position(position_), size(size_) + { + } + }; + + enum lexeme_t + { + lex_none = 0, + lex_equal, + lex_not_equal, + lex_less, + lex_greater, + lex_less_or_equal, + lex_greater_or_equal, + lex_plus, + lex_minus, + lex_multiply, + lex_union, + lex_var_ref, + lex_open_brace, + lex_close_brace, + lex_quoted_string, + lex_number, + lex_slash, + lex_double_slash, + lex_open_square_brace, + lex_close_square_brace, + lex_string, + lex_comma, + lex_axis_attribute, + lex_dot, + lex_double_dot, + lex_double_colon, + lex_eof + }; + + struct xpath_lexer_string + { + const char_t* begin; + const char_t* end; + + xpath_lexer_string(): begin(0), end(0) + { + } + + bool operator==(const char_t* other) const + { + size_t length = static_cast(end - begin); + + return strequalrange(other, begin, length); + } + }; + + class xpath_lexer + { + const char_t* _cur; + const char_t* _cur_lexeme_pos; + xpath_lexer_string _cur_lexeme_contents; + + lexeme_t _cur_lexeme; + + public: + explicit xpath_lexer(const char_t* query): _cur(query) + { + next(); + } + + const char_t* state() const + { + return _cur; + } + + void next() + { + const char_t* cur = _cur; + + while (PUGI__IS_CHARTYPE(*cur, ct_space)) ++cur; + + // save lexeme position for error reporting + _cur_lexeme_pos = cur; + + switch (*cur) + { + case 0: + _cur_lexeme = lex_eof; + break; + + case '>': + if (*(cur+1) == '=') + { + cur += 2; + _cur_lexeme = lex_greater_or_equal; + } + else + { + cur += 1; + _cur_lexeme = lex_greater; + } + break; + + case '<': + if (*(cur+1) == '=') + { + cur += 2; + _cur_lexeme = lex_less_or_equal; + } + else + { + cur += 1; + _cur_lexeme = lex_less; + } + break; + + case '!': + if (*(cur+1) == '=') + { + cur += 2; + _cur_lexeme = lex_not_equal; + } + else + { + _cur_lexeme = lex_none; + } + break; + + case '=': + cur += 1; + _cur_lexeme = lex_equal; + + break; + + case '+': + cur += 1; + _cur_lexeme = lex_plus; + + break; + + case '-': + cur += 1; + _cur_lexeme = lex_minus; + + break; + + case '*': + cur += 1; + _cur_lexeme = lex_multiply; + + break; + + case '|': + cur += 1; + _cur_lexeme = lex_union; + + break; + + case '$': + cur += 1; + + if (PUGI__IS_CHARTYPEX(*cur, ctx_start_symbol)) + { + _cur_lexeme_contents.begin = cur; + + while (PUGI__IS_CHARTYPEX(*cur, ctx_symbol)) cur++; + + if (cur[0] == ':' && PUGI__IS_CHARTYPEX(cur[1], ctx_symbol)) // qname + { + cur++; // : + + while (PUGI__IS_CHARTYPEX(*cur, ctx_symbol)) cur++; + } + + _cur_lexeme_contents.end = cur; + + _cur_lexeme = lex_var_ref; + } + else + { + _cur_lexeme = lex_none; + } + + break; + + case '(': + cur += 1; + _cur_lexeme = lex_open_brace; + + break; + + case ')': + cur += 1; + _cur_lexeme = lex_close_brace; + + break; + + case '[': + cur += 1; + _cur_lexeme = lex_open_square_brace; + + break; + + case ']': + cur += 1; + _cur_lexeme = lex_close_square_brace; + + break; + + case ',': + cur += 1; + _cur_lexeme = lex_comma; + + break; + + case '/': + if (*(cur+1) == '/') + { + cur += 2; + _cur_lexeme = lex_double_slash; + } + else + { + cur += 1; + _cur_lexeme = lex_slash; + } + break; + + case '.': + if (*(cur+1) == '.') + { + cur += 2; + _cur_lexeme = lex_double_dot; + } + else if (PUGI__IS_CHARTYPEX(*(cur+1), ctx_digit)) + { + _cur_lexeme_contents.begin = cur; // . + + ++cur; + + while (PUGI__IS_CHARTYPEX(*cur, ctx_digit)) cur++; + + _cur_lexeme_contents.end = cur; + + _cur_lexeme = lex_number; + } + else + { + cur += 1; + _cur_lexeme = lex_dot; + } + break; + + case '@': + cur += 1; + _cur_lexeme = lex_axis_attribute; + + break; + + case '"': + case '\'': + { + char_t terminator = *cur; + + ++cur; + + _cur_lexeme_contents.begin = cur; + while (*cur && *cur != terminator) cur++; + _cur_lexeme_contents.end = cur; + + if (!*cur) + _cur_lexeme = lex_none; + else + { + cur += 1; + _cur_lexeme = lex_quoted_string; + } + + break; + } + + case ':': + if (*(cur+1) == ':') + { + cur += 2; + _cur_lexeme = lex_double_colon; + } + else + { + _cur_lexeme = lex_none; + } + break; + + default: + if (PUGI__IS_CHARTYPEX(*cur, ctx_digit)) + { + _cur_lexeme_contents.begin = cur; + + while (PUGI__IS_CHARTYPEX(*cur, ctx_digit)) cur++; + + if (*cur == '.') + { + cur++; + + while (PUGI__IS_CHARTYPEX(*cur, ctx_digit)) cur++; + } + + _cur_lexeme_contents.end = cur; + + _cur_lexeme = lex_number; + } + else if (PUGI__IS_CHARTYPEX(*cur, ctx_start_symbol)) + { + _cur_lexeme_contents.begin = cur; + + while (PUGI__IS_CHARTYPEX(*cur, ctx_symbol)) cur++; + + if (cur[0] == ':') + { + if (cur[1] == '*') // namespace test ncname:* + { + cur += 2; // :* + } + else if (PUGI__IS_CHARTYPEX(cur[1], ctx_symbol)) // namespace test qname + { + cur++; // : + + while (PUGI__IS_CHARTYPEX(*cur, ctx_symbol)) cur++; + } + } + + _cur_lexeme_contents.end = cur; + + _cur_lexeme = lex_string; + } + else + { + _cur_lexeme = lex_none; + } + } + + _cur = cur; + } + + lexeme_t current() const + { + return _cur_lexeme; + } + + const char_t* current_pos() const + { + return _cur_lexeme_pos; + } + + const xpath_lexer_string& contents() const + { + assert(_cur_lexeme == lex_var_ref || _cur_lexeme == lex_number || _cur_lexeme == lex_string || _cur_lexeme == lex_quoted_string); + + return _cur_lexeme_contents; + } + }; + + enum ast_type_t + { + ast_unknown, + ast_op_or, // left or right + ast_op_and, // left and right + ast_op_equal, // left = right + ast_op_not_equal, // left != right + ast_op_less, // left < right + ast_op_greater, // left > right + ast_op_less_or_equal, // left <= right + ast_op_greater_or_equal, // left >= right + ast_op_add, // left + right + ast_op_subtract, // left - right + ast_op_multiply, // left * right + ast_op_divide, // left / right + ast_op_mod, // left % right + ast_op_negate, // left - right + ast_op_union, // left | right + ast_predicate, // apply predicate to set; next points to next predicate + ast_filter, // select * from left where right + ast_string_constant, // string constant + ast_number_constant, // number constant + ast_variable, // variable + ast_func_last, // last() + ast_func_position, // position() + ast_func_count, // count(left) + ast_func_id, // id(left) + ast_func_local_name_0, // local-name() + ast_func_local_name_1, // local-name(left) + ast_func_namespace_uri_0, // namespace-uri() + ast_func_namespace_uri_1, // namespace-uri(left) + ast_func_name_0, // name() + ast_func_name_1, // name(left) + ast_func_string_0, // string() + ast_func_string_1, // string(left) + ast_func_concat, // concat(left, right, siblings) + ast_func_starts_with, // starts_with(left, right) + ast_func_contains, // contains(left, right) + ast_func_substring_before, // substring-before(left, right) + ast_func_substring_after, // substring-after(left, right) + ast_func_substring_2, // substring(left, right) + ast_func_substring_3, // substring(left, right, third) + ast_func_string_length_0, // string-length() + ast_func_string_length_1, // string-length(left) + ast_func_normalize_space_0, // normalize-space() + ast_func_normalize_space_1, // normalize-space(left) + ast_func_translate, // translate(left, right, third) + ast_func_boolean, // boolean(left) + ast_func_not, // not(left) + ast_func_true, // true() + ast_func_false, // false() + ast_func_lang, // lang(left) + ast_func_number_0, // number() + ast_func_number_1, // number(left) + ast_func_sum, // sum(left) + ast_func_floor, // floor(left) + ast_func_ceiling, // ceiling(left) + ast_func_round, // round(left) + ast_step, // process set left with step + ast_step_root, // select root node + + ast_opt_translate_table, // translate(left, right, third) where right/third are constants + ast_opt_compare_attribute // @name = 'string' + }; + + enum axis_t + { + axis_ancestor, + axis_ancestor_or_self, + axis_attribute, + axis_child, + axis_descendant, + axis_descendant_or_self, + axis_following, + axis_following_sibling, + axis_namespace, + axis_parent, + axis_preceding, + axis_preceding_sibling, + axis_self + }; + + enum nodetest_t + { + nodetest_none, + nodetest_name, + nodetest_type_node, + nodetest_type_comment, + nodetest_type_pi, + nodetest_type_text, + nodetest_pi, + nodetest_all, + nodetest_all_in_namespace + }; + + enum predicate_t + { + predicate_default, + predicate_posinv, + predicate_constant, + predicate_constant_one + }; + + enum nodeset_eval_t + { + nodeset_eval_all, + nodeset_eval_any, + nodeset_eval_first + }; + + template struct axis_to_type + { + static const axis_t axis; + }; + + template const axis_t axis_to_type::axis = N; + + class xpath_ast_node + { + private: + // node type + char _type; + char _rettype; + + // for ast_step + char _axis; + + // for ast_step/ast_predicate/ast_filter + char _test; + + // tree node structure + xpath_ast_node* _left; + xpath_ast_node* _right; + xpath_ast_node* _next; + + union + { + // value for ast_string_constant + const char_t* string; + // value for ast_number_constant + double number; + // variable for ast_variable + xpath_variable* variable; + // node test for ast_step (node name/namespace/node type/pi target) + const char_t* nodetest; + // table for ast_opt_translate_table + const unsigned char* table; + } _data; + + xpath_ast_node(const xpath_ast_node&); + xpath_ast_node& operator=(const xpath_ast_node&); + + template static bool compare_eq(xpath_ast_node* lhs, xpath_ast_node* rhs, const xpath_context& c, const xpath_stack& stack, const Comp& comp) + { + xpath_value_type lt = lhs->rettype(), rt = rhs->rettype(); + + if (lt != xpath_type_node_set && rt != xpath_type_node_set) + { + if (lt == xpath_type_boolean || rt == xpath_type_boolean) + return comp(lhs->eval_boolean(c, stack), rhs->eval_boolean(c, stack)); + else if (lt == xpath_type_number || rt == xpath_type_number) + return comp(lhs->eval_number(c, stack), rhs->eval_number(c, stack)); + else if (lt == xpath_type_string || rt == xpath_type_string) + { + xpath_allocator_capture cr(stack.result); + + xpath_string ls = lhs->eval_string(c, stack); + xpath_string rs = rhs->eval_string(c, stack); + + return comp(ls, rs); + } + } + else if (lt == xpath_type_node_set && rt == xpath_type_node_set) + { + xpath_allocator_capture cr(stack.result); + + xpath_node_set_raw ls = lhs->eval_node_set(c, stack, nodeset_eval_all); + xpath_node_set_raw rs = rhs->eval_node_set(c, stack, nodeset_eval_all); + + for (const xpath_node* li = ls.begin(); li != ls.end(); ++li) + for (const xpath_node* ri = rs.begin(); ri != rs.end(); ++ri) + { + xpath_allocator_capture cri(stack.result); + + if (comp(string_value(*li, stack.result), string_value(*ri, stack.result))) + return true; + } + + return false; + } + else + { + if (lt == xpath_type_node_set) + { + swap(lhs, rhs); + swap(lt, rt); + } + + if (lt == xpath_type_boolean) + return comp(lhs->eval_boolean(c, stack), rhs->eval_boolean(c, stack)); + else if (lt == xpath_type_number) + { + xpath_allocator_capture cr(stack.result); + + double l = lhs->eval_number(c, stack); + xpath_node_set_raw rs = rhs->eval_node_set(c, stack, nodeset_eval_all); + + for (const xpath_node* ri = rs.begin(); ri != rs.end(); ++ri) + { + xpath_allocator_capture cri(stack.result); + + if (comp(l, convert_string_to_number(string_value(*ri, stack.result).c_str()))) + return true; + } + + return false; + } + else if (lt == xpath_type_string) + { + xpath_allocator_capture cr(stack.result); + + xpath_string l = lhs->eval_string(c, stack); + xpath_node_set_raw rs = rhs->eval_node_set(c, stack, nodeset_eval_all); + + for (const xpath_node* ri = rs.begin(); ri != rs.end(); ++ri) + { + xpath_allocator_capture cri(stack.result); + + if (comp(l, string_value(*ri, stack.result))) + return true; + } + + return false; + } + } + + assert(false && "Wrong types"); // unreachable + return false; + } + + static bool eval_once(xpath_node_set::type_t type, nodeset_eval_t eval) + { + return type == xpath_node_set::type_sorted ? eval != nodeset_eval_all : eval == nodeset_eval_any; + } + + template static bool compare_rel(xpath_ast_node* lhs, xpath_ast_node* rhs, const xpath_context& c, const xpath_stack& stack, const Comp& comp) + { + xpath_value_type lt = lhs->rettype(), rt = rhs->rettype(); + + if (lt != xpath_type_node_set && rt != xpath_type_node_set) + return comp(lhs->eval_number(c, stack), rhs->eval_number(c, stack)); + else if (lt == xpath_type_node_set && rt == xpath_type_node_set) + { + xpath_allocator_capture cr(stack.result); + + xpath_node_set_raw ls = lhs->eval_node_set(c, stack, nodeset_eval_all); + xpath_node_set_raw rs = rhs->eval_node_set(c, stack, nodeset_eval_all); + + for (const xpath_node* li = ls.begin(); li != ls.end(); ++li) + { + xpath_allocator_capture cri(stack.result); + + double l = convert_string_to_number(string_value(*li, stack.result).c_str()); + + for (const xpath_node* ri = rs.begin(); ri != rs.end(); ++ri) + { + xpath_allocator_capture crii(stack.result); + + if (comp(l, convert_string_to_number(string_value(*ri, stack.result).c_str()))) + return true; + } + } + + return false; + } + else if (lt != xpath_type_node_set && rt == xpath_type_node_set) + { + xpath_allocator_capture cr(stack.result); + + double l = lhs->eval_number(c, stack); + xpath_node_set_raw rs = rhs->eval_node_set(c, stack, nodeset_eval_all); + + for (const xpath_node* ri = rs.begin(); ri != rs.end(); ++ri) + { + xpath_allocator_capture cri(stack.result); + + if (comp(l, convert_string_to_number(string_value(*ri, stack.result).c_str()))) + return true; + } + + return false; + } + else if (lt == xpath_type_node_set && rt != xpath_type_node_set) + { + xpath_allocator_capture cr(stack.result); + + xpath_node_set_raw ls = lhs->eval_node_set(c, stack, nodeset_eval_all); + double r = rhs->eval_number(c, stack); + + for (const xpath_node* li = ls.begin(); li != ls.end(); ++li) + { + xpath_allocator_capture cri(stack.result); + + if (comp(convert_string_to_number(string_value(*li, stack.result).c_str()), r)) + return true; + } + + return false; + } + else + { + assert(false && "Wrong types"); // unreachable + return false; + } + } + + static void apply_predicate_boolean(xpath_node_set_raw& ns, size_t first, xpath_ast_node* expr, const xpath_stack& stack, bool once) + { + assert(ns.size() >= first); + assert(expr->rettype() != xpath_type_number); + + size_t i = 1; + size_t size = ns.size() - first; + + xpath_node* last = ns.begin() + first; + + // remove_if... or well, sort of + for (xpath_node* it = last; it != ns.end(); ++it, ++i) + { + xpath_context c(*it, i, size); + + if (expr->eval_boolean(c, stack)) + { + *last++ = *it; + + if (once) break; + } + } + + ns.truncate(last); + } + + static void apply_predicate_number(xpath_node_set_raw& ns, size_t first, xpath_ast_node* expr, const xpath_stack& stack, bool once) + { + assert(ns.size() >= first); + assert(expr->rettype() == xpath_type_number); + + size_t i = 1; + size_t size = ns.size() - first; + + xpath_node* last = ns.begin() + first; + + // remove_if... or well, sort of + for (xpath_node* it = last; it != ns.end(); ++it, ++i) + { + xpath_context c(*it, i, size); + + if (expr->eval_number(c, stack) == static_cast(i)) + { + *last++ = *it; + + if (once) break; + } + } + + ns.truncate(last); + } + + static void apply_predicate_number_const(xpath_node_set_raw& ns, size_t first, xpath_ast_node* expr, const xpath_stack& stack) + { + assert(ns.size() >= first); + assert(expr->rettype() == xpath_type_number); + + size_t size = ns.size() - first; + + xpath_node* last = ns.begin() + first; + + xpath_context c(xpath_node(), 1, size); + + double er = expr->eval_number(c, stack); + + if (er >= 1.0 && er <= static_cast(size)) + { + size_t eri = static_cast(er); + + if (er == static_cast(eri)) + { + xpath_node r = last[eri - 1]; + + *last++ = r; + } + } + + ns.truncate(last); + } + + void apply_predicate(xpath_node_set_raw& ns, size_t first, const xpath_stack& stack, bool once) + { + if (ns.size() == first) return; + + assert(_type == ast_filter || _type == ast_predicate); + + if (_test == predicate_constant || _test == predicate_constant_one) + apply_predicate_number_const(ns, first, _right, stack); + else if (_right->rettype() == xpath_type_number) + apply_predicate_number(ns, first, _right, stack, once); + else + apply_predicate_boolean(ns, first, _right, stack, once); + } + + void apply_predicates(xpath_node_set_raw& ns, size_t first, const xpath_stack& stack, nodeset_eval_t eval) + { + if (ns.size() == first) return; + + bool last_once = eval_once(ns.type(), eval); + + for (xpath_ast_node* pred = _right; pred; pred = pred->_next) + pred->apply_predicate(ns, first, stack, !pred->_next && last_once); + } + + bool step_push(xpath_node_set_raw& ns, xml_attribute_struct* a, xml_node_struct* parent, xpath_allocator* alloc) + { + assert(a); + + const char_t* name = a->name ? a->name + 0 : PUGIXML_TEXT(""); + + switch (_test) + { + case nodetest_name: + if (strequal(name, _data.nodetest) && is_xpath_attribute(name)) + { + ns.push_back(xpath_node(xml_attribute(a), xml_node(parent)), alloc); + return true; + } + break; + + case nodetest_type_node: + case nodetest_all: + if (is_xpath_attribute(name)) + { + ns.push_back(xpath_node(xml_attribute(a), xml_node(parent)), alloc); + return true; + } + break; + + case nodetest_all_in_namespace: + if (starts_with(name, _data.nodetest) && is_xpath_attribute(name)) + { + ns.push_back(xpath_node(xml_attribute(a), xml_node(parent)), alloc); + return true; + } + break; + + default: + ; + } + + return false; + } + + bool step_push(xpath_node_set_raw& ns, xml_node_struct* n, xpath_allocator* alloc) + { + assert(n); + + xml_node_type type = PUGI__NODETYPE(n); + + switch (_test) + { + case nodetest_name: + if (type == node_element && n->name && strequal(n->name, _data.nodetest)) + { + ns.push_back(xml_node(n), alloc); + return true; + } + break; + + case nodetest_type_node: + ns.push_back(xml_node(n), alloc); + return true; + + case nodetest_type_comment: + if (type == node_comment) + { + ns.push_back(xml_node(n), alloc); + return true; + } + break; + + case nodetest_type_text: + if (type == node_pcdata || type == node_cdata) + { + ns.push_back(xml_node(n), alloc); + return true; + } + break; + + case nodetest_type_pi: + if (type == node_pi) + { + ns.push_back(xml_node(n), alloc); + return true; + } + break; + + case nodetest_pi: + if (type == node_pi && n->name && strequal(n->name, _data.nodetest)) + { + ns.push_back(xml_node(n), alloc); + return true; + } + break; + + case nodetest_all: + if (type == node_element) + { + ns.push_back(xml_node(n), alloc); + return true; + } + break; + + case nodetest_all_in_namespace: + if (type == node_element && n->name && starts_with(n->name, _data.nodetest)) + { + ns.push_back(xml_node(n), alloc); + return true; + } + break; + + default: + assert(false && "Unknown axis"); // unreachable + } + + return false; + } + + template void step_fill(xpath_node_set_raw& ns, xml_node_struct* n, xpath_allocator* alloc, bool once, T) + { + const axis_t axis = T::axis; + + switch (axis) + { + case axis_attribute: + { + for (xml_attribute_struct* a = n->first_attribute; a; a = a->next_attribute) + if (step_push(ns, a, n, alloc) & once) + return; + + break; + } + + case axis_child: + { + for (xml_node_struct* c = n->first_child; c; c = c->next_sibling) + if (step_push(ns, c, alloc) & once) + return; + + break; + } + + case axis_descendant: + case axis_descendant_or_self: + { + if (axis == axis_descendant_or_self) + if (step_push(ns, n, alloc) & once) + return; + + xml_node_struct* cur = n->first_child; + + while (cur) + { + if (step_push(ns, cur, alloc) & once) + return; + + if (cur->first_child) + cur = cur->first_child; + else + { + while (!cur->next_sibling) + { + cur = cur->parent; + + if (cur == n) return; + } + + cur = cur->next_sibling; + } + } + + break; + } + + case axis_following_sibling: + { + for (xml_node_struct* c = n->next_sibling; c; c = c->next_sibling) + if (step_push(ns, c, alloc) & once) + return; + + break; + } + + case axis_preceding_sibling: + { + for (xml_node_struct* c = n->prev_sibling_c; c->next_sibling; c = c->prev_sibling_c) + if (step_push(ns, c, alloc) & once) + return; + + break; + } + + case axis_following: + { + xml_node_struct* cur = n; + + // exit from this node so that we don't include descendants + while (!cur->next_sibling) + { + cur = cur->parent; + + if (!cur) return; + } + + cur = cur->next_sibling; + + while (cur) + { + if (step_push(ns, cur, alloc) & once) + return; + + if (cur->first_child) + cur = cur->first_child; + else + { + while (!cur->next_sibling) + { + cur = cur->parent; + + if (!cur) return; + } + + cur = cur->next_sibling; + } + } + + break; + } + + case axis_preceding: + { + xml_node_struct* cur = n; + + // exit from this node so that we don't include descendants + while (!cur->prev_sibling_c->next_sibling) + { + cur = cur->parent; + + if (!cur) return; + } + + cur = cur->prev_sibling_c; + + while (cur) + { + if (cur->first_child) + cur = cur->first_child->prev_sibling_c; + else + { + // leaf node, can't be ancestor + if (step_push(ns, cur, alloc) & once) + return; + + while (!cur->prev_sibling_c->next_sibling) + { + cur = cur->parent; + + if (!cur) return; + + if (!node_is_ancestor(cur, n)) + if (step_push(ns, cur, alloc) & once) + return; + } + + cur = cur->prev_sibling_c; + } + } + + break; + } + + case axis_ancestor: + case axis_ancestor_or_self: + { + if (axis == axis_ancestor_or_self) + if (step_push(ns, n, alloc) & once) + return; + + xml_node_struct* cur = n->parent; + + while (cur) + { + if (step_push(ns, cur, alloc) & once) + return; + + cur = cur->parent; + } + + break; + } + + case axis_self: + { + step_push(ns, n, alloc); + + break; + } + + case axis_parent: + { + if (n->parent) + step_push(ns, n->parent, alloc); + + break; + } + + default: + assert(false && "Unimplemented axis"); // unreachable + } + } + + template void step_fill(xpath_node_set_raw& ns, xml_attribute_struct* a, xml_node_struct* p, xpath_allocator* alloc, bool once, T v) + { + const axis_t axis = T::axis; + + switch (axis) + { + case axis_ancestor: + case axis_ancestor_or_self: + { + if (axis == axis_ancestor_or_self && _test == nodetest_type_node) // reject attributes based on principal node type test + if (step_push(ns, a, p, alloc) & once) + return; + + xml_node_struct* cur = p; + + while (cur) + { + if (step_push(ns, cur, alloc) & once) + return; + + cur = cur->parent; + } + + break; + } + + case axis_descendant_or_self: + case axis_self: + { + if (_test == nodetest_type_node) // reject attributes based on principal node type test + step_push(ns, a, p, alloc); + + break; + } + + case axis_following: + { + xml_node_struct* cur = p; + + while (cur) + { + if (cur->first_child) + cur = cur->first_child; + else + { + while (!cur->next_sibling) + { + cur = cur->parent; + + if (!cur) return; + } + + cur = cur->next_sibling; + } + + if (step_push(ns, cur, alloc) & once) + return; + } + + break; + } + + case axis_parent: + { + step_push(ns, p, alloc); + + break; + } + + case axis_preceding: + { + // preceding:: axis does not include attribute nodes and attribute ancestors (they are the same as parent's ancestors), so we can reuse node preceding + step_fill(ns, p, alloc, once, v); + break; + } + + default: + assert(false && "Unimplemented axis"); // unreachable + } + } + + template void step_fill(xpath_node_set_raw& ns, const xpath_node& xn, xpath_allocator* alloc, bool once, T v) + { + const axis_t axis = T::axis; + const bool axis_has_attributes = (axis == axis_ancestor || axis == axis_ancestor_or_self || axis == axis_descendant_or_self || axis == axis_following || axis == axis_parent || axis == axis_preceding || axis == axis_self); + + if (xn.node()) + step_fill(ns, xn.node().internal_object(), alloc, once, v); + else if (axis_has_attributes && xn.attribute() && xn.parent()) + step_fill(ns, xn.attribute().internal_object(), xn.parent().internal_object(), alloc, once, v); + } + + template xpath_node_set_raw step_do(const xpath_context& c, const xpath_stack& stack, nodeset_eval_t eval, T v) + { + const axis_t axis = T::axis; + const bool axis_reverse = (axis == axis_ancestor || axis == axis_ancestor_or_self || axis == axis_preceding || axis == axis_preceding_sibling); + const xpath_node_set::type_t axis_type = axis_reverse ? xpath_node_set::type_sorted_reverse : xpath_node_set::type_sorted; + + bool once = + (axis == axis_attribute && _test == nodetest_name) || + (!_right && eval_once(axis_type, eval)) || + // coverity[mixed_enums] + (_right && !_right->_next && _right->_test == predicate_constant_one); + + xpath_node_set_raw ns; + ns.set_type(axis_type); + + if (_left) + { + xpath_node_set_raw s = _left->eval_node_set(c, stack, nodeset_eval_all); + + // self axis preserves the original order + if (axis == axis_self) ns.set_type(s.type()); + + for (const xpath_node* it = s.begin(); it != s.end(); ++it) + { + size_t size = ns.size(); + + // in general, all axes generate elements in a particular order, but there is no order guarantee if axis is applied to two nodes + if (axis != axis_self && size != 0) ns.set_type(xpath_node_set::type_unsorted); + + step_fill(ns, *it, stack.result, once, v); + if (_right) apply_predicates(ns, size, stack, eval); + } + } + else + { + step_fill(ns, c.n, stack.result, once, v); + if (_right) apply_predicates(ns, 0, stack, eval); + } + + // child, attribute and self axes always generate unique set of nodes + // for other axis, if the set stayed sorted, it stayed unique because the traversal algorithms do not visit the same node twice + if (axis != axis_child && axis != axis_attribute && axis != axis_self && ns.type() == xpath_node_set::type_unsorted) + ns.remove_duplicates(stack.temp); + + return ns; + } + + public: + xpath_ast_node(ast_type_t type, xpath_value_type rettype_, const char_t* value): + _type(static_cast(type)), _rettype(static_cast(rettype_)), _axis(0), _test(0), _left(0), _right(0), _next(0) + { + assert(type == ast_string_constant); + _data.string = value; + } + + xpath_ast_node(ast_type_t type, xpath_value_type rettype_, double value): + _type(static_cast(type)), _rettype(static_cast(rettype_)), _axis(0), _test(0), _left(0), _right(0), _next(0) + { + assert(type == ast_number_constant); + _data.number = value; + } + + xpath_ast_node(ast_type_t type, xpath_value_type rettype_, xpath_variable* value): + _type(static_cast(type)), _rettype(static_cast(rettype_)), _axis(0), _test(0), _left(0), _right(0), _next(0) + { + assert(type == ast_variable); + _data.variable = value; + } + + xpath_ast_node(ast_type_t type, xpath_value_type rettype_, xpath_ast_node* left = 0, xpath_ast_node* right = 0): + _type(static_cast(type)), _rettype(static_cast(rettype_)), _axis(0), _test(0), _left(left), _right(right), _next(0) + { + } + + xpath_ast_node(ast_type_t type, xpath_ast_node* left, axis_t axis, nodetest_t test, const char_t* contents): + _type(static_cast(type)), _rettype(xpath_type_node_set), _axis(static_cast(axis)), _test(static_cast(test)), _left(left), _right(0), _next(0) + { + assert(type == ast_step); + _data.nodetest = contents; + } + + xpath_ast_node(ast_type_t type, xpath_ast_node* left, xpath_ast_node* right, predicate_t test): + _type(static_cast(type)), _rettype(xpath_type_node_set), _axis(0), _test(static_cast(test)), _left(left), _right(right), _next(0) + { + assert(type == ast_filter || type == ast_predicate); + } + + void set_next(xpath_ast_node* value) + { + _next = value; + } + + void set_right(xpath_ast_node* value) + { + _right = value; + } + + bool eval_boolean(const xpath_context& c, const xpath_stack& stack) + { + switch (_type) + { + case ast_op_or: + return _left->eval_boolean(c, stack) || _right->eval_boolean(c, stack); + + case ast_op_and: + return _left->eval_boolean(c, stack) && _right->eval_boolean(c, stack); + + case ast_op_equal: + return compare_eq(_left, _right, c, stack, equal_to()); + + case ast_op_not_equal: + return compare_eq(_left, _right, c, stack, not_equal_to()); + + case ast_op_less: + return compare_rel(_left, _right, c, stack, less()); + + case ast_op_greater: + return compare_rel(_right, _left, c, stack, less()); + + case ast_op_less_or_equal: + return compare_rel(_left, _right, c, stack, less_equal()); + + case ast_op_greater_or_equal: + return compare_rel(_right, _left, c, stack, less_equal()); + + case ast_func_starts_with: + { + xpath_allocator_capture cr(stack.result); + + xpath_string lr = _left->eval_string(c, stack); + xpath_string rr = _right->eval_string(c, stack); + + return starts_with(lr.c_str(), rr.c_str()); + } + + case ast_func_contains: + { + xpath_allocator_capture cr(stack.result); + + xpath_string lr = _left->eval_string(c, stack); + xpath_string rr = _right->eval_string(c, stack); + + return find_substring(lr.c_str(), rr.c_str()) != 0; + } + + case ast_func_boolean: + return _left->eval_boolean(c, stack); + + case ast_func_not: + return !_left->eval_boolean(c, stack); + + case ast_func_true: + return true; + + case ast_func_false: + return false; + + case ast_func_lang: + { + if (c.n.attribute()) return false; + + xpath_allocator_capture cr(stack.result); + + xpath_string lang = _left->eval_string(c, stack); + + for (xml_node n = c.n.node(); n; n = n.parent()) + { + xml_attribute a = n.attribute(PUGIXML_TEXT("xml:lang")); + + if (a) + { + const char_t* value = a.value(); + + // strnicmp / strncasecmp is not portable + for (const char_t* lit = lang.c_str(); *lit; ++lit) + { + if (tolower_ascii(*lit) != tolower_ascii(*value)) return false; + ++value; + } + + return *value == 0 || *value == '-'; + } + } + + return false; + } + + case ast_opt_compare_attribute: + { + const char_t* value = (_right->_type == ast_string_constant) ? _right->_data.string : _right->_data.variable->get_string(); + + xml_attribute attr = c.n.node().attribute(_left->_data.nodetest); + + return attr && strequal(attr.value(), value) && is_xpath_attribute(attr.name()); + } + + case ast_variable: + { + assert(_rettype == _data.variable->type()); + + if (_rettype == xpath_type_boolean) + return _data.variable->get_boolean(); + + // variable needs to be converted to the correct type, this is handled by the fallthrough block below + break; + } + + default: + ; + } + + // none of the ast types that return the value directly matched, we need to perform type conversion + switch (_rettype) + { + case xpath_type_number: + return convert_number_to_boolean(eval_number(c, stack)); + + case xpath_type_string: + { + xpath_allocator_capture cr(stack.result); + + return !eval_string(c, stack).empty(); + } + + case xpath_type_node_set: + { + xpath_allocator_capture cr(stack.result); + + return !eval_node_set(c, stack, nodeset_eval_any).empty(); + } + + default: + assert(false && "Wrong expression for return type boolean"); // unreachable + return false; + } + } + + double eval_number(const xpath_context& c, const xpath_stack& stack) + { + switch (_type) + { + case ast_op_add: + return _left->eval_number(c, stack) + _right->eval_number(c, stack); + + case ast_op_subtract: + return _left->eval_number(c, stack) - _right->eval_number(c, stack); + + case ast_op_multiply: + return _left->eval_number(c, stack) * _right->eval_number(c, stack); + + case ast_op_divide: + return _left->eval_number(c, stack) / _right->eval_number(c, stack); + + case ast_op_mod: + return fmod(_left->eval_number(c, stack), _right->eval_number(c, stack)); + + case ast_op_negate: + return -_left->eval_number(c, stack); + + case ast_number_constant: + return _data.number; + + case ast_func_last: + return static_cast(c.size); + + case ast_func_position: + return static_cast(c.position); + + case ast_func_count: + { + xpath_allocator_capture cr(stack.result); + + return static_cast(_left->eval_node_set(c, stack, nodeset_eval_all).size()); + } + + case ast_func_string_length_0: + { + xpath_allocator_capture cr(stack.result); + + return static_cast(string_value(c.n, stack.result).length()); + } + + case ast_func_string_length_1: + { + xpath_allocator_capture cr(stack.result); + + return static_cast(_left->eval_string(c, stack).length()); + } + + case ast_func_number_0: + { + xpath_allocator_capture cr(stack.result); + + return convert_string_to_number(string_value(c.n, stack.result).c_str()); + } + + case ast_func_number_1: + return _left->eval_number(c, stack); + + case ast_func_sum: + { + xpath_allocator_capture cr(stack.result); + + double r = 0; + + xpath_node_set_raw ns = _left->eval_node_set(c, stack, nodeset_eval_all); + + for (const xpath_node* it = ns.begin(); it != ns.end(); ++it) + { + xpath_allocator_capture cri(stack.result); + + r += convert_string_to_number(string_value(*it, stack.result).c_str()); + } + + return r; + } + + case ast_func_floor: + { + double r = _left->eval_number(c, stack); + + return r == r ? floor(r) : r; + } + + case ast_func_ceiling: + { + double r = _left->eval_number(c, stack); + + return r == r ? ceil(r) : r; + } + + case ast_func_round: + return round_nearest_nzero(_left->eval_number(c, stack)); + + case ast_variable: + { + assert(_rettype == _data.variable->type()); + + if (_rettype == xpath_type_number) + return _data.variable->get_number(); + + // variable needs to be converted to the correct type, this is handled by the fallthrough block below + break; + } + + default: + ; + } + + // none of the ast types that return the value directly matched, we need to perform type conversion + switch (_rettype) + { + case xpath_type_boolean: + return eval_boolean(c, stack) ? 1 : 0; + + case xpath_type_string: + { + xpath_allocator_capture cr(stack.result); + + return convert_string_to_number(eval_string(c, stack).c_str()); + } + + case xpath_type_node_set: + { + xpath_allocator_capture cr(stack.result); + + return convert_string_to_number(eval_string(c, stack).c_str()); + } + + default: + assert(false && "Wrong expression for return type number"); // unreachable + return 0; + } + } + + xpath_string eval_string_concat(const xpath_context& c, const xpath_stack& stack) + { + assert(_type == ast_func_concat); + + xpath_allocator_capture ct(stack.temp); + + // count the string number + size_t count = 1; + for (xpath_ast_node* nc = _right; nc; nc = nc->_next) count++; + + // allocate a buffer for temporary string objects + xpath_string* buffer = static_cast(stack.temp->allocate(count * sizeof(xpath_string))); + if (!buffer) return xpath_string(); + + // evaluate all strings to temporary stack + xpath_stack swapped_stack = {stack.temp, stack.result}; + + buffer[0] = _left->eval_string(c, swapped_stack); + + size_t pos = 1; + for (xpath_ast_node* n = _right; n; n = n->_next, ++pos) buffer[pos] = n->eval_string(c, swapped_stack); + assert(pos == count); + + // get total length + size_t length = 0; + for (size_t i = 0; i < count; ++i) length += buffer[i].length(); + + // create final string + char_t* result = static_cast(stack.result->allocate((length + 1) * sizeof(char_t))); + if (!result) return xpath_string(); + + char_t* ri = result; + + for (size_t j = 0; j < count; ++j) + for (const char_t* bi = buffer[j].c_str(); *bi; ++bi) + *ri++ = *bi; + + *ri = 0; + + return xpath_string::from_heap_preallocated(result, ri); + } + + xpath_string eval_string(const xpath_context& c, const xpath_stack& stack) + { + switch (_type) + { + case ast_string_constant: + return xpath_string::from_const(_data.string); + + case ast_func_local_name_0: + { + xpath_node na = c.n; + + return xpath_string::from_const(local_name(na)); + } + + case ast_func_local_name_1: + { + xpath_allocator_capture cr(stack.result); + + xpath_node_set_raw ns = _left->eval_node_set(c, stack, nodeset_eval_first); + xpath_node na = ns.first(); + + return xpath_string::from_const(local_name(na)); + } + + case ast_func_name_0: + { + xpath_node na = c.n; + + return xpath_string::from_const(qualified_name(na)); + } + + case ast_func_name_1: + { + xpath_allocator_capture cr(stack.result); + + xpath_node_set_raw ns = _left->eval_node_set(c, stack, nodeset_eval_first); + xpath_node na = ns.first(); + + return xpath_string::from_const(qualified_name(na)); + } + + case ast_func_namespace_uri_0: + { + xpath_node na = c.n; + + return xpath_string::from_const(namespace_uri(na)); + } + + case ast_func_namespace_uri_1: + { + xpath_allocator_capture cr(stack.result); + + xpath_node_set_raw ns = _left->eval_node_set(c, stack, nodeset_eval_first); + xpath_node na = ns.first(); + + return xpath_string::from_const(namespace_uri(na)); + } + + case ast_func_string_0: + return string_value(c.n, stack.result); + + case ast_func_string_1: + return _left->eval_string(c, stack); + + case ast_func_concat: + return eval_string_concat(c, stack); + + case ast_func_substring_before: + { + xpath_allocator_capture cr(stack.temp); + + xpath_stack swapped_stack = {stack.temp, stack.result}; + + xpath_string s = _left->eval_string(c, swapped_stack); + xpath_string p = _right->eval_string(c, swapped_stack); + + const char_t* pos = find_substring(s.c_str(), p.c_str()); + + return pos ? xpath_string::from_heap(s.c_str(), pos, stack.result) : xpath_string(); + } + + case ast_func_substring_after: + { + xpath_allocator_capture cr(stack.temp); + + xpath_stack swapped_stack = {stack.temp, stack.result}; + + xpath_string s = _left->eval_string(c, swapped_stack); + xpath_string p = _right->eval_string(c, swapped_stack); + + const char_t* pos = find_substring(s.c_str(), p.c_str()); + if (!pos) return xpath_string(); + + const char_t* rbegin = pos + p.length(); + const char_t* rend = s.c_str() + s.length(); + + return s.uses_heap() ? xpath_string::from_heap(rbegin, rend, stack.result) : xpath_string::from_const(rbegin); + } + + case ast_func_substring_2: + { + xpath_allocator_capture cr(stack.temp); + + xpath_stack swapped_stack = {stack.temp, stack.result}; + + xpath_string s = _left->eval_string(c, swapped_stack); + size_t s_length = s.length(); + + double first = round_nearest(_right->eval_number(c, stack)); + + if (is_nan(first)) return xpath_string(); // NaN + else if (first >= static_cast(s_length + 1)) return xpath_string(); + + size_t pos = first < 1 ? 1 : static_cast(first); + assert(1 <= pos && pos <= s_length + 1); + + const char_t* rbegin = s.c_str() + (pos - 1); + const char_t* rend = s.c_str() + s.length(); + + return s.uses_heap() ? xpath_string::from_heap(rbegin, rend, stack.result) : xpath_string::from_const(rbegin); + } + + case ast_func_substring_3: + { + xpath_allocator_capture cr(stack.temp); + + xpath_stack swapped_stack = {stack.temp, stack.result}; + + xpath_string s = _left->eval_string(c, swapped_stack); + size_t s_length = s.length(); + + double first = round_nearest(_right->eval_number(c, stack)); + double last = first + round_nearest(_right->_next->eval_number(c, stack)); + + if (is_nan(first) || is_nan(last)) return xpath_string(); + else if (first >= static_cast(s_length + 1)) return xpath_string(); + else if (first >= last) return xpath_string(); + else if (last < 1) return xpath_string(); + + size_t pos = first < 1 ? 1 : static_cast(first); + size_t end = last >= static_cast(s_length + 1) ? s_length + 1 : static_cast(last); + + assert(1 <= pos && pos <= end && end <= s_length + 1); + const char_t* rbegin = s.c_str() + (pos - 1); + const char_t* rend = s.c_str() + (end - 1); + + return (end == s_length + 1 && !s.uses_heap()) ? xpath_string::from_const(rbegin) : xpath_string::from_heap(rbegin, rend, stack.result); + } + + case ast_func_normalize_space_0: + { + xpath_string s = string_value(c.n, stack.result); + + char_t* begin = s.data(stack.result); + if (!begin) return xpath_string(); + + char_t* end = normalize_space(begin); + + return xpath_string::from_heap_preallocated(begin, end); + } + + case ast_func_normalize_space_1: + { + xpath_string s = _left->eval_string(c, stack); + + char_t* begin = s.data(stack.result); + if (!begin) return xpath_string(); + + char_t* end = normalize_space(begin); + + return xpath_string::from_heap_preallocated(begin, end); + } + + case ast_func_translate: + { + xpath_allocator_capture cr(stack.temp); + + xpath_stack swapped_stack = {stack.temp, stack.result}; + + xpath_string s = _left->eval_string(c, stack); + xpath_string from = _right->eval_string(c, swapped_stack); + xpath_string to = _right->_next->eval_string(c, swapped_stack); + + char_t* begin = s.data(stack.result); + if (!begin) return xpath_string(); + + char_t* end = translate(begin, from.c_str(), to.c_str(), to.length()); + + return xpath_string::from_heap_preallocated(begin, end); + } + + case ast_opt_translate_table: + { + xpath_string s = _left->eval_string(c, stack); + + char_t* begin = s.data(stack.result); + if (!begin) return xpath_string(); + + char_t* end = translate_table(begin, _data.table); + + return xpath_string::from_heap_preallocated(begin, end); + } + + case ast_variable: + { + assert(_rettype == _data.variable->type()); + + if (_rettype == xpath_type_string) + return xpath_string::from_const(_data.variable->get_string()); + + // variable needs to be converted to the correct type, this is handled by the fallthrough block below + break; + } + + default: + ; + } + + // none of the ast types that return the value directly matched, we need to perform type conversion + switch (_rettype) + { + case xpath_type_boolean: + return xpath_string::from_const(eval_boolean(c, stack) ? PUGIXML_TEXT("true") : PUGIXML_TEXT("false")); + + case xpath_type_number: + return convert_number_to_string(eval_number(c, stack), stack.result); + + case xpath_type_node_set: + { + xpath_allocator_capture cr(stack.temp); + + xpath_stack swapped_stack = {stack.temp, stack.result}; + + xpath_node_set_raw ns = eval_node_set(c, swapped_stack, nodeset_eval_first); + return ns.empty() ? xpath_string() : string_value(ns.first(), stack.result); + } + + default: + assert(false && "Wrong expression for return type string"); // unreachable + return xpath_string(); + } + } + + xpath_node_set_raw eval_node_set(const xpath_context& c, const xpath_stack& stack, nodeset_eval_t eval) + { + switch (_type) + { + case ast_op_union: + { + xpath_allocator_capture cr(stack.temp); + + xpath_stack swapped_stack = {stack.temp, stack.result}; + + xpath_node_set_raw ls = _left->eval_node_set(c, stack, eval); + xpath_node_set_raw rs = _right->eval_node_set(c, swapped_stack, eval); + + // we can optimize merging two sorted sets, but this is a very rare operation, so don't bother + ls.set_type(xpath_node_set::type_unsorted); + + ls.append(rs.begin(), rs.end(), stack.result); + ls.remove_duplicates(stack.temp); + + return ls; + } + + case ast_filter: + { + xpath_node_set_raw set = _left->eval_node_set(c, stack, _test == predicate_constant_one ? nodeset_eval_first : nodeset_eval_all); + + // either expression is a number or it contains position() call; sort by document order + if (_test != predicate_posinv) set.sort_do(); + + bool once = eval_once(set.type(), eval); + + apply_predicate(set, 0, stack, once); + + return set; + } + + case ast_func_id: + return xpath_node_set_raw(); + + case ast_step: + { + switch (_axis) + { + case axis_ancestor: + return step_do(c, stack, eval, axis_to_type()); + + case axis_ancestor_or_self: + return step_do(c, stack, eval, axis_to_type()); + + case axis_attribute: + return step_do(c, stack, eval, axis_to_type()); + + case axis_child: + return step_do(c, stack, eval, axis_to_type()); + + case axis_descendant: + return step_do(c, stack, eval, axis_to_type()); + + case axis_descendant_or_self: + return step_do(c, stack, eval, axis_to_type()); + + case axis_following: + return step_do(c, stack, eval, axis_to_type()); + + case axis_following_sibling: + return step_do(c, stack, eval, axis_to_type()); + + case axis_namespace: + // namespaced axis is not supported + return xpath_node_set_raw(); + + case axis_parent: + return step_do(c, stack, eval, axis_to_type()); + + case axis_preceding: + return step_do(c, stack, eval, axis_to_type()); + + case axis_preceding_sibling: + return step_do(c, stack, eval, axis_to_type()); + + case axis_self: + return step_do(c, stack, eval, axis_to_type()); + + default: + assert(false && "Unknown axis"); // unreachable + return xpath_node_set_raw(); + } + } + + case ast_step_root: + { + assert(!_right); // root step can't have any predicates + + xpath_node_set_raw ns; + + ns.set_type(xpath_node_set::type_sorted); + + if (c.n.node()) ns.push_back(c.n.node().root(), stack.result); + else if (c.n.attribute()) ns.push_back(c.n.parent().root(), stack.result); + + return ns; + } + + case ast_variable: + { + assert(_rettype == _data.variable->type()); + + if (_rettype == xpath_type_node_set) + { + const xpath_node_set& s = _data.variable->get_node_set(); + + xpath_node_set_raw ns; + + ns.set_type(s.type()); + ns.append(s.begin(), s.end(), stack.result); + + return ns; + } + + // variable needs to be converted to the correct type, this is handled by the fallthrough block below + break; + } + + default: + ; + } + + // none of the ast types that return the value directly matched, but conversions to node set are invalid + assert(false && "Wrong expression for return type node set"); // unreachable + return xpath_node_set_raw(); + } + + void optimize(xpath_allocator* alloc) + { + if (_left) + _left->optimize(alloc); + + if (_right) + _right->optimize(alloc); + + if (_next) + _next->optimize(alloc); + + // coverity[var_deref_model] + optimize_self(alloc); + } + + void optimize_self(xpath_allocator* alloc) + { + // Rewrite [position()=expr] with [expr] + // Note that this step has to go before classification to recognize [position()=1] + if ((_type == ast_filter || _type == ast_predicate) && + _right && // workaround for clang static analyzer (_right is never null for ast_filter/ast_predicate) + _right->_type == ast_op_equal && _right->_left->_type == ast_func_position && _right->_right->_rettype == xpath_type_number) + { + _right = _right->_right; + } + + // Classify filter/predicate ops to perform various optimizations during evaluation + if ((_type == ast_filter || _type == ast_predicate) && _right) // workaround for clang static analyzer (_right is never null for ast_filter/ast_predicate) + { + assert(_test == predicate_default); + + if (_right->_type == ast_number_constant && _right->_data.number == 1.0) + _test = predicate_constant_one; + else if (_right->_rettype == xpath_type_number && (_right->_type == ast_number_constant || _right->_type == ast_variable || _right->_type == ast_func_last)) + _test = predicate_constant; + else if (_right->_rettype != xpath_type_number && _right->is_posinv_expr()) + _test = predicate_posinv; + } + + // Rewrite descendant-or-self::node()/child::foo with descendant::foo + // The former is a full form of //foo, the latter is much faster since it executes the node test immediately + // Do a similar kind of rewrite for self/descendant/descendant-or-self axes + // Note that we only rewrite positionally invariant steps (//foo[1] != /descendant::foo[1]) + if (_type == ast_step && (_axis == axis_child || _axis == axis_self || _axis == axis_descendant || _axis == axis_descendant_or_self) && + _left && _left->_type == ast_step && _left->_axis == axis_descendant_or_self && _left->_test == nodetest_type_node && !_left->_right && + is_posinv_step()) + { + if (_axis == axis_child || _axis == axis_descendant) + _axis = axis_descendant; + else + _axis = axis_descendant_or_self; + + _left = _left->_left; + } + + // Use optimized lookup table implementation for translate() with constant arguments + if (_type == ast_func_translate && + _right && // workaround for clang static analyzer (_right is never null for ast_func_translate) + _right->_type == ast_string_constant && _right->_next->_type == ast_string_constant) + { + unsigned char* table = translate_table_generate(alloc, _right->_data.string, _right->_next->_data.string); + + if (table) + { + _type = ast_opt_translate_table; + _data.table = table; + } + } + + // Use optimized path for @attr = 'value' or @attr = $value + if (_type == ast_op_equal && + _left && _right && // workaround for clang static analyzer and Coverity (_left and _right are never null for ast_op_equal) + // coverity[mixed_enums] + _left->_type == ast_step && _left->_axis == axis_attribute && _left->_test == nodetest_name && !_left->_left && !_left->_right && + (_right->_type == ast_string_constant || (_right->_type == ast_variable && _right->_rettype == xpath_type_string))) + { + _type = ast_opt_compare_attribute; + } + } + + bool is_posinv_expr() const + { + switch (_type) + { + case ast_func_position: + case ast_func_last: + return false; + + case ast_string_constant: + case ast_number_constant: + case ast_variable: + return true; + + case ast_step: + case ast_step_root: + return true; + + case ast_predicate: + case ast_filter: + return true; + + default: + if (_left && !_left->is_posinv_expr()) return false; + + for (xpath_ast_node* n = _right; n; n = n->_next) + if (!n->is_posinv_expr()) return false; + + return true; + } + } + + bool is_posinv_step() const + { + assert(_type == ast_step); + + for (xpath_ast_node* n = _right; n; n = n->_next) + { + assert(n->_type == ast_predicate); + + if (n->_test != predicate_posinv) + return false; + } + + return true; + } + + xpath_value_type rettype() const + { + return static_cast(_rettype); + } + }; + + static const size_t xpath_ast_depth_limit = + #ifdef PUGIXML_XPATH_DEPTH_LIMIT + PUGIXML_XPATH_DEPTH_LIMIT + #else + 1024 + #endif + ; + + struct xpath_parser + { + xpath_allocator* _alloc; + xpath_lexer _lexer; + + const char_t* _query; + xpath_variable_set* _variables; + + xpath_parse_result* _result; + + char_t _scratch[32]; + + size_t _depth; + + xpath_ast_node* error(const char* message) + { + _result->error = message; + _result->offset = _lexer.current_pos() - _query; + + return 0; + } + + xpath_ast_node* error_oom() + { + assert(_alloc->_error); + *_alloc->_error = true; + + return 0; + } + + xpath_ast_node* error_rec() + { + return error("Exceeded maximum allowed query depth"); + } + + void* alloc_node() + { + return _alloc->allocate(sizeof(xpath_ast_node)); + } + + xpath_ast_node* alloc_node(ast_type_t type, xpath_value_type rettype, const char_t* value) + { + void* memory = alloc_node(); + return memory ? new (memory) xpath_ast_node(type, rettype, value) : 0; + } + + xpath_ast_node* alloc_node(ast_type_t type, xpath_value_type rettype, double value) + { + void* memory = alloc_node(); + return memory ? new (memory) xpath_ast_node(type, rettype, value) : 0; + } + + xpath_ast_node* alloc_node(ast_type_t type, xpath_value_type rettype, xpath_variable* value) + { + void* memory = alloc_node(); + return memory ? new (memory) xpath_ast_node(type, rettype, value) : 0; + } + + xpath_ast_node* alloc_node(ast_type_t type, xpath_value_type rettype, xpath_ast_node* left = 0, xpath_ast_node* right = 0) + { + void* memory = alloc_node(); + return memory ? new (memory) xpath_ast_node(type, rettype, left, right) : 0; + } + + xpath_ast_node* alloc_node(ast_type_t type, xpath_ast_node* left, axis_t axis, nodetest_t test, const char_t* contents) + { + void* memory = alloc_node(); + return memory ? new (memory) xpath_ast_node(type, left, axis, test, contents) : 0; + } + + xpath_ast_node* alloc_node(ast_type_t type, xpath_ast_node* left, xpath_ast_node* right, predicate_t test) + { + void* memory = alloc_node(); + return memory ? new (memory) xpath_ast_node(type, left, right, test) : 0; + } + + const char_t* alloc_string(const xpath_lexer_string& value) + { + if (!value.begin) + return PUGIXML_TEXT(""); + + size_t length = static_cast(value.end - value.begin); + + char_t* c = static_cast(_alloc->allocate((length + 1) * sizeof(char_t))); + if (!c) return 0; + + memcpy(c, value.begin, length * sizeof(char_t)); + c[length] = 0; + + return c; + } + + xpath_ast_node* parse_function(const xpath_lexer_string& name, size_t argc, xpath_ast_node* args[2]) + { + switch (name.begin[0]) + { + case 'b': + if (name == PUGIXML_TEXT("boolean") && argc == 1) + return alloc_node(ast_func_boolean, xpath_type_boolean, args[0]); + + break; + + case 'c': + if (name == PUGIXML_TEXT("count") && argc == 1) + { + if (args[0]->rettype() != xpath_type_node_set) return error("Function has to be applied to node set"); + return alloc_node(ast_func_count, xpath_type_number, args[0]); + } + else if (name == PUGIXML_TEXT("contains") && argc == 2) + return alloc_node(ast_func_contains, xpath_type_boolean, args[0], args[1]); + else if (name == PUGIXML_TEXT("concat") && argc >= 2) + return alloc_node(ast_func_concat, xpath_type_string, args[0], args[1]); + else if (name == PUGIXML_TEXT("ceiling") && argc == 1) + return alloc_node(ast_func_ceiling, xpath_type_number, args[0]); + + break; + + case 'f': + if (name == PUGIXML_TEXT("false") && argc == 0) + return alloc_node(ast_func_false, xpath_type_boolean); + else if (name == PUGIXML_TEXT("floor") && argc == 1) + return alloc_node(ast_func_floor, xpath_type_number, args[0]); + + break; + + case 'i': + if (name == PUGIXML_TEXT("id") && argc == 1) + return alloc_node(ast_func_id, xpath_type_node_set, args[0]); + + break; + + case 'l': + if (name == PUGIXML_TEXT("last") && argc == 0) + return alloc_node(ast_func_last, xpath_type_number); + else if (name == PUGIXML_TEXT("lang") && argc == 1) + return alloc_node(ast_func_lang, xpath_type_boolean, args[0]); + else if (name == PUGIXML_TEXT("local-name") && argc <= 1) + { + if (argc == 1 && args[0]->rettype() != xpath_type_node_set) return error("Function has to be applied to node set"); + return alloc_node(argc == 0 ? ast_func_local_name_0 : ast_func_local_name_1, xpath_type_string, args[0]); + } + + break; + + case 'n': + if (name == PUGIXML_TEXT("name") && argc <= 1) + { + if (argc == 1 && args[0]->rettype() != xpath_type_node_set) return error("Function has to be applied to node set"); + return alloc_node(argc == 0 ? ast_func_name_0 : ast_func_name_1, xpath_type_string, args[0]); + } + else if (name == PUGIXML_TEXT("namespace-uri") && argc <= 1) + { + if (argc == 1 && args[0]->rettype() != xpath_type_node_set) return error("Function has to be applied to node set"); + return alloc_node(argc == 0 ? ast_func_namespace_uri_0 : ast_func_namespace_uri_1, xpath_type_string, args[0]); + } + else if (name == PUGIXML_TEXT("normalize-space") && argc <= 1) + return alloc_node(argc == 0 ? ast_func_normalize_space_0 : ast_func_normalize_space_1, xpath_type_string, args[0], args[1]); + else if (name == PUGIXML_TEXT("not") && argc == 1) + return alloc_node(ast_func_not, xpath_type_boolean, args[0]); + else if (name == PUGIXML_TEXT("number") && argc <= 1) + return alloc_node(argc == 0 ? ast_func_number_0 : ast_func_number_1, xpath_type_number, args[0]); + + break; + + case 'p': + if (name == PUGIXML_TEXT("position") && argc == 0) + return alloc_node(ast_func_position, xpath_type_number); + + break; + + case 'r': + if (name == PUGIXML_TEXT("round") && argc == 1) + return alloc_node(ast_func_round, xpath_type_number, args[0]); + + break; + + case 's': + if (name == PUGIXML_TEXT("string") && argc <= 1) + return alloc_node(argc == 0 ? ast_func_string_0 : ast_func_string_1, xpath_type_string, args[0]); + else if (name == PUGIXML_TEXT("string-length") && argc <= 1) + return alloc_node(argc == 0 ? ast_func_string_length_0 : ast_func_string_length_1, xpath_type_number, args[0]); + else if (name == PUGIXML_TEXT("starts-with") && argc == 2) + return alloc_node(ast_func_starts_with, xpath_type_boolean, args[0], args[1]); + else if (name == PUGIXML_TEXT("substring-before") && argc == 2) + return alloc_node(ast_func_substring_before, xpath_type_string, args[0], args[1]); + else if (name == PUGIXML_TEXT("substring-after") && argc == 2) + return alloc_node(ast_func_substring_after, xpath_type_string, args[0], args[1]); + else if (name == PUGIXML_TEXT("substring") && (argc == 2 || argc == 3)) + return alloc_node(argc == 2 ? ast_func_substring_2 : ast_func_substring_3, xpath_type_string, args[0], args[1]); + else if (name == PUGIXML_TEXT("sum") && argc == 1) + { + if (args[0]->rettype() != xpath_type_node_set) return error("Function has to be applied to node set"); + return alloc_node(ast_func_sum, xpath_type_number, args[0]); + } + + break; + + case 't': + if (name == PUGIXML_TEXT("translate") && argc == 3) + return alloc_node(ast_func_translate, xpath_type_string, args[0], args[1]); + else if (name == PUGIXML_TEXT("true") && argc == 0) + return alloc_node(ast_func_true, xpath_type_boolean); + + break; + + default: + break; + } + + return error("Unrecognized function or wrong parameter count"); + } + + axis_t parse_axis_name(const xpath_lexer_string& name, bool& specified) + { + specified = true; + + switch (name.begin[0]) + { + case 'a': + if (name == PUGIXML_TEXT("ancestor")) + return axis_ancestor; + else if (name == PUGIXML_TEXT("ancestor-or-self")) + return axis_ancestor_or_self; + else if (name == PUGIXML_TEXT("attribute")) + return axis_attribute; + + break; + + case 'c': + if (name == PUGIXML_TEXT("child")) + return axis_child; + + break; + + case 'd': + if (name == PUGIXML_TEXT("descendant")) + return axis_descendant; + else if (name == PUGIXML_TEXT("descendant-or-self")) + return axis_descendant_or_self; + + break; + + case 'f': + if (name == PUGIXML_TEXT("following")) + return axis_following; + else if (name == PUGIXML_TEXT("following-sibling")) + return axis_following_sibling; + + break; + + case 'n': + if (name == PUGIXML_TEXT("namespace")) + return axis_namespace; + + break; + + case 'p': + if (name == PUGIXML_TEXT("parent")) + return axis_parent; + else if (name == PUGIXML_TEXT("preceding")) + return axis_preceding; + else if (name == PUGIXML_TEXT("preceding-sibling")) + return axis_preceding_sibling; + + break; + + case 's': + if (name == PUGIXML_TEXT("self")) + return axis_self; + + break; + + default: + break; + } + + specified = false; + return axis_child; + } + + nodetest_t parse_node_test_type(const xpath_lexer_string& name) + { + switch (name.begin[0]) + { + case 'c': + if (name == PUGIXML_TEXT("comment")) + return nodetest_type_comment; + + break; + + case 'n': + if (name == PUGIXML_TEXT("node")) + return nodetest_type_node; + + break; + + case 'p': + if (name == PUGIXML_TEXT("processing-instruction")) + return nodetest_type_pi; + + break; + + case 't': + if (name == PUGIXML_TEXT("text")) + return nodetest_type_text; + + break; + + default: + break; + } + + return nodetest_none; + } + + // PrimaryExpr ::= VariableReference | '(' Expr ')' | Literal | Number | FunctionCall + xpath_ast_node* parse_primary_expression() + { + switch (_lexer.current()) + { + case lex_var_ref: + { + xpath_lexer_string name = _lexer.contents(); + + if (!_variables) + return error("Unknown variable: variable set is not provided"); + + xpath_variable* var = 0; + if (!get_variable_scratch(_scratch, _variables, name.begin, name.end, &var)) + return error_oom(); + + if (!var) + return error("Unknown variable: variable set does not contain the given name"); + + _lexer.next(); + + return alloc_node(ast_variable, var->type(), var); + } + + case lex_open_brace: + { + _lexer.next(); + + xpath_ast_node* n = parse_expression(); + if (!n) return 0; + + if (_lexer.current() != lex_close_brace) + return error("Expected ')' to match an opening '('"); + + _lexer.next(); + + return n; + } + + case lex_quoted_string: + { + const char_t* value = alloc_string(_lexer.contents()); + if (!value) return 0; + + _lexer.next(); + + return alloc_node(ast_string_constant, xpath_type_string, value); + } + + case lex_number: + { + double value = 0; + + if (!convert_string_to_number_scratch(_scratch, _lexer.contents().begin, _lexer.contents().end, &value)) + return error_oom(); + + _lexer.next(); + + return alloc_node(ast_number_constant, xpath_type_number, value); + } + + case lex_string: + { + xpath_ast_node* args[2] = {0}; + size_t argc = 0; + + xpath_lexer_string function = _lexer.contents(); + _lexer.next(); + + xpath_ast_node* last_arg = 0; + + if (_lexer.current() != lex_open_brace) + return error("Unrecognized function call"); + _lexer.next(); + + size_t old_depth = _depth; + + while (_lexer.current() != lex_close_brace) + { + if (argc > 0) + { + if (_lexer.current() != lex_comma) + return error("No comma between function arguments"); + _lexer.next(); + } + + if (++_depth > xpath_ast_depth_limit) + return error_rec(); + + xpath_ast_node* n = parse_expression(); + if (!n) return 0; + + if (argc < 2) args[argc] = n; + else last_arg->set_next(n); + + argc++; + last_arg = n; + } + + _lexer.next(); + + _depth = old_depth; + + return parse_function(function, argc, args); + } + + default: + return error("Unrecognizable primary expression"); + } + } + + // FilterExpr ::= PrimaryExpr | FilterExpr Predicate + // Predicate ::= '[' PredicateExpr ']' + // PredicateExpr ::= Expr + xpath_ast_node* parse_filter_expression() + { + xpath_ast_node* n = parse_primary_expression(); + if (!n) return 0; + + size_t old_depth = _depth; + + while (_lexer.current() == lex_open_square_brace) + { + _lexer.next(); + + if (++_depth > xpath_ast_depth_limit) + return error_rec(); + + if (n->rettype() != xpath_type_node_set) + return error("Predicate has to be applied to node set"); + + xpath_ast_node* expr = parse_expression(); + if (!expr) return 0; + + n = alloc_node(ast_filter, n, expr, predicate_default); + if (!n) return 0; + + if (_lexer.current() != lex_close_square_brace) + return error("Expected ']' to match an opening '['"); + + _lexer.next(); + } + + _depth = old_depth; + + return n; + } + + // Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep + // AxisSpecifier ::= AxisName '::' | '@'? + // NodeTest ::= NameTest | NodeType '(' ')' | 'processing-instruction' '(' Literal ')' + // NameTest ::= '*' | NCName ':' '*' | QName + // AbbreviatedStep ::= '.' | '..' + xpath_ast_node* parse_step(xpath_ast_node* set) + { + if (set && set->rettype() != xpath_type_node_set) + return error("Step has to be applied to node set"); + + bool axis_specified = false; + axis_t axis = axis_child; // implied child axis + + if (_lexer.current() == lex_axis_attribute) + { + axis = axis_attribute; + axis_specified = true; + + _lexer.next(); + } + else if (_lexer.current() == lex_dot) + { + _lexer.next(); + + if (_lexer.current() == lex_open_square_brace) + return error("Predicates are not allowed after an abbreviated step"); + + return alloc_node(ast_step, set, axis_self, nodetest_type_node, 0); + } + else if (_lexer.current() == lex_double_dot) + { + _lexer.next(); + + if (_lexer.current() == lex_open_square_brace) + return error("Predicates are not allowed after an abbreviated step"); + + return alloc_node(ast_step, set, axis_parent, nodetest_type_node, 0); + } + + nodetest_t nt_type = nodetest_none; + xpath_lexer_string nt_name; + + if (_lexer.current() == lex_string) + { + // node name test + nt_name = _lexer.contents(); + _lexer.next(); + + // was it an axis name? + if (_lexer.current() == lex_double_colon) + { + // parse axis name + if (axis_specified) + return error("Two axis specifiers in one step"); + + axis = parse_axis_name(nt_name, axis_specified); + + if (!axis_specified) + return error("Unknown axis"); + + // read actual node test + _lexer.next(); + + if (_lexer.current() == lex_multiply) + { + nt_type = nodetest_all; + nt_name = xpath_lexer_string(); + _lexer.next(); + } + else if (_lexer.current() == lex_string) + { + nt_name = _lexer.contents(); + _lexer.next(); + } + else + { + return error("Unrecognized node test"); + } + } + + if (nt_type == nodetest_none) + { + // node type test or processing-instruction + if (_lexer.current() == lex_open_brace) + { + _lexer.next(); + + if (_lexer.current() == lex_close_brace) + { + _lexer.next(); + + nt_type = parse_node_test_type(nt_name); + + if (nt_type == nodetest_none) + return error("Unrecognized node type"); + + nt_name = xpath_lexer_string(); + } + else if (nt_name == PUGIXML_TEXT("processing-instruction")) + { + if (_lexer.current() != lex_quoted_string) + return error("Only literals are allowed as arguments to processing-instruction()"); + + nt_type = nodetest_pi; + nt_name = _lexer.contents(); + _lexer.next(); + + if (_lexer.current() != lex_close_brace) + return error("Unmatched brace near processing-instruction()"); + _lexer.next(); + } + else + { + return error("Unmatched brace near node type test"); + } + } + // QName or NCName:* + else + { + if (nt_name.end - nt_name.begin > 2 && nt_name.end[-2] == ':' && nt_name.end[-1] == '*') // NCName:* + { + nt_name.end--; // erase * + + nt_type = nodetest_all_in_namespace; + } + else + { + nt_type = nodetest_name; + } + } + } + } + else if (_lexer.current() == lex_multiply) + { + nt_type = nodetest_all; + _lexer.next(); + } + else + { + return error("Unrecognized node test"); + } + + const char_t* nt_name_copy = alloc_string(nt_name); + if (!nt_name_copy) return 0; + + xpath_ast_node* n = alloc_node(ast_step, set, axis, nt_type, nt_name_copy); + if (!n) return 0; + + size_t old_depth = _depth; + + xpath_ast_node* last = 0; + + while (_lexer.current() == lex_open_square_brace) + { + _lexer.next(); + + if (++_depth > xpath_ast_depth_limit) + return error_rec(); + + xpath_ast_node* expr = parse_expression(); + if (!expr) return 0; + + xpath_ast_node* pred = alloc_node(ast_predicate, 0, expr, predicate_default); + if (!pred) return 0; + + if (_lexer.current() != lex_close_square_brace) + return error("Expected ']' to match an opening '['"); + _lexer.next(); + + if (last) last->set_next(pred); + else n->set_right(pred); + + last = pred; + } + + _depth = old_depth; + + return n; + } + + // RelativeLocationPath ::= Step | RelativeLocationPath '/' Step | RelativeLocationPath '//' Step + xpath_ast_node* parse_relative_location_path(xpath_ast_node* set) + { + xpath_ast_node* n = parse_step(set); + if (!n) return 0; + + size_t old_depth = _depth; + + while (_lexer.current() == lex_slash || _lexer.current() == lex_double_slash) + { + lexeme_t l = _lexer.current(); + _lexer.next(); + + if (l == lex_double_slash) + { + n = alloc_node(ast_step, n, axis_descendant_or_self, nodetest_type_node, 0); + if (!n) return 0; + + ++_depth; + } + + if (++_depth > xpath_ast_depth_limit) + return error_rec(); + + n = parse_step(n); + if (!n) return 0; + } + + _depth = old_depth; + + return n; + } + + // LocationPath ::= RelativeLocationPath | AbsoluteLocationPath + // AbsoluteLocationPath ::= '/' RelativeLocationPath? | '//' RelativeLocationPath + xpath_ast_node* parse_location_path() + { + if (_lexer.current() == lex_slash) + { + _lexer.next(); + + xpath_ast_node* n = alloc_node(ast_step_root, xpath_type_node_set); + if (!n) return 0; + + // relative location path can start from axis_attribute, dot, double_dot, multiply and string lexemes; any other lexeme means standalone root path + lexeme_t l = _lexer.current(); + + if (l == lex_string || l == lex_axis_attribute || l == lex_dot || l == lex_double_dot || l == lex_multiply) + return parse_relative_location_path(n); + else + return n; + } + else if (_lexer.current() == lex_double_slash) + { + _lexer.next(); + + xpath_ast_node* n = alloc_node(ast_step_root, xpath_type_node_set); + if (!n) return 0; + + n = alloc_node(ast_step, n, axis_descendant_or_self, nodetest_type_node, 0); + if (!n) return 0; + + return parse_relative_location_path(n); + } + + // else clause moved outside of if because of bogus warning 'control may reach end of non-void function being inlined' in gcc 4.0.1 + return parse_relative_location_path(0); + } + + // PathExpr ::= LocationPath + // | FilterExpr + // | FilterExpr '/' RelativeLocationPath + // | FilterExpr '//' RelativeLocationPath + // UnionExpr ::= PathExpr | UnionExpr '|' PathExpr + // UnaryExpr ::= UnionExpr | '-' UnaryExpr + xpath_ast_node* parse_path_or_unary_expression() + { + // Clarification. + // PathExpr begins with either LocationPath or FilterExpr. + // FilterExpr begins with PrimaryExpr + // PrimaryExpr begins with '$' in case of it being a variable reference, + // '(' in case of it being an expression, string literal, number constant or + // function call. + if (_lexer.current() == lex_var_ref || _lexer.current() == lex_open_brace || + _lexer.current() == lex_quoted_string || _lexer.current() == lex_number || + _lexer.current() == lex_string) + { + if (_lexer.current() == lex_string) + { + // This is either a function call, or not - if not, we shall proceed with location path + const char_t* state = _lexer.state(); + + while (PUGI__IS_CHARTYPE(*state, ct_space)) ++state; + + if (*state != '(') + return parse_location_path(); + + // This looks like a function call; however this still can be a node-test. Check it. + if (parse_node_test_type(_lexer.contents()) != nodetest_none) + return parse_location_path(); + } + + xpath_ast_node* n = parse_filter_expression(); + if (!n) return 0; + + if (_lexer.current() == lex_slash || _lexer.current() == lex_double_slash) + { + lexeme_t l = _lexer.current(); + _lexer.next(); + + if (l == lex_double_slash) + { + if (n->rettype() != xpath_type_node_set) + return error("Step has to be applied to node set"); + + n = alloc_node(ast_step, n, axis_descendant_or_self, nodetest_type_node, 0); + if (!n) return 0; + } + + // select from location path + return parse_relative_location_path(n); + } + + return n; + } + else if (_lexer.current() == lex_minus) + { + _lexer.next(); + + // precedence 7+ - only parses union expressions + xpath_ast_node* n = parse_expression(7); + if (!n) return 0; + + return alloc_node(ast_op_negate, xpath_type_number, n); + } + else + { + return parse_location_path(); + } + } + + struct binary_op_t + { + ast_type_t asttype; + xpath_value_type rettype; + int precedence; + + binary_op_t(): asttype(ast_unknown), rettype(xpath_type_none), precedence(0) + { + } + + binary_op_t(ast_type_t asttype_, xpath_value_type rettype_, int precedence_): asttype(asttype_), rettype(rettype_), precedence(precedence_) + { + } + + static binary_op_t parse(xpath_lexer& lexer) + { + switch (lexer.current()) + { + case lex_string: + if (lexer.contents() == PUGIXML_TEXT("or")) + return binary_op_t(ast_op_or, xpath_type_boolean, 1); + else if (lexer.contents() == PUGIXML_TEXT("and")) + return binary_op_t(ast_op_and, xpath_type_boolean, 2); + else if (lexer.contents() == PUGIXML_TEXT("div")) + return binary_op_t(ast_op_divide, xpath_type_number, 6); + else if (lexer.contents() == PUGIXML_TEXT("mod")) + return binary_op_t(ast_op_mod, xpath_type_number, 6); + else + return binary_op_t(); + + case lex_equal: + return binary_op_t(ast_op_equal, xpath_type_boolean, 3); + + case lex_not_equal: + return binary_op_t(ast_op_not_equal, xpath_type_boolean, 3); + + case lex_less: + return binary_op_t(ast_op_less, xpath_type_boolean, 4); + + case lex_greater: + return binary_op_t(ast_op_greater, xpath_type_boolean, 4); + + case lex_less_or_equal: + return binary_op_t(ast_op_less_or_equal, xpath_type_boolean, 4); + + case lex_greater_or_equal: + return binary_op_t(ast_op_greater_or_equal, xpath_type_boolean, 4); + + case lex_plus: + return binary_op_t(ast_op_add, xpath_type_number, 5); + + case lex_minus: + return binary_op_t(ast_op_subtract, xpath_type_number, 5); + + case lex_multiply: + return binary_op_t(ast_op_multiply, xpath_type_number, 6); + + case lex_union: + return binary_op_t(ast_op_union, xpath_type_node_set, 7); + + default: + return binary_op_t(); + } + } + }; + + xpath_ast_node* parse_expression_rec(xpath_ast_node* lhs, int limit) + { + binary_op_t op = binary_op_t::parse(_lexer); + + while (op.asttype != ast_unknown && op.precedence >= limit) + { + _lexer.next(); + + if (++_depth > xpath_ast_depth_limit) + return error_rec(); + + xpath_ast_node* rhs = parse_path_or_unary_expression(); + if (!rhs) return 0; + + binary_op_t nextop = binary_op_t::parse(_lexer); + + while (nextop.asttype != ast_unknown && nextop.precedence > op.precedence) + { + rhs = parse_expression_rec(rhs, nextop.precedence); + if (!rhs) return 0; + + nextop = binary_op_t::parse(_lexer); + } + + if (op.asttype == ast_op_union && (lhs->rettype() != xpath_type_node_set || rhs->rettype() != xpath_type_node_set)) + return error("Union operator has to be applied to node sets"); + + lhs = alloc_node(op.asttype, op.rettype, lhs, rhs); + if (!lhs) return 0; + + op = binary_op_t::parse(_lexer); + } + + return lhs; + } + + // Expr ::= OrExpr + // OrExpr ::= AndExpr | OrExpr 'or' AndExpr + // AndExpr ::= EqualityExpr | AndExpr 'and' EqualityExpr + // EqualityExpr ::= RelationalExpr + // | EqualityExpr '=' RelationalExpr + // | EqualityExpr '!=' RelationalExpr + // RelationalExpr ::= AdditiveExpr + // | RelationalExpr '<' AdditiveExpr + // | RelationalExpr '>' AdditiveExpr + // | RelationalExpr '<=' AdditiveExpr + // | RelationalExpr '>=' AdditiveExpr + // AdditiveExpr ::= MultiplicativeExpr + // | AdditiveExpr '+' MultiplicativeExpr + // | AdditiveExpr '-' MultiplicativeExpr + // MultiplicativeExpr ::= UnaryExpr + // | MultiplicativeExpr '*' UnaryExpr + // | MultiplicativeExpr 'div' UnaryExpr + // | MultiplicativeExpr 'mod' UnaryExpr + xpath_ast_node* parse_expression(int limit = 0) + { + size_t old_depth = _depth; + + if (++_depth > xpath_ast_depth_limit) + return error_rec(); + + xpath_ast_node* n = parse_path_or_unary_expression(); + if (!n) return 0; + + n = parse_expression_rec(n, limit); + + _depth = old_depth; + + return n; + } + + xpath_parser(const char_t* query, xpath_variable_set* variables, xpath_allocator* alloc, xpath_parse_result* result): _alloc(alloc), _lexer(query), _query(query), _variables(variables), _result(result), _depth(0) + { + } + + xpath_ast_node* parse() + { + xpath_ast_node* n = parse_expression(); + if (!n) return 0; + + assert(_depth == 0); + + // check if there are unparsed tokens left + if (_lexer.current() != lex_eof) + return error("Incorrect query"); + + return n; + } + + static xpath_ast_node* parse(const char_t* query, xpath_variable_set* variables, xpath_allocator* alloc, xpath_parse_result* result) + { + xpath_parser parser(query, variables, alloc, result); + + return parser.parse(); + } + }; + + struct xpath_query_impl + { + static xpath_query_impl* create() + { + void* memory = xml_memory::allocate(sizeof(xpath_query_impl)); + if (!memory) return 0; + + return new (memory) xpath_query_impl(); + } + + static void destroy(xpath_query_impl* impl) + { + // free all allocated pages + impl->alloc.release(); + + // free allocator memory (with the first page) + xml_memory::deallocate(impl); + } + + xpath_query_impl(): root(0), alloc(&block, &oom), oom(false) + { + block.next = 0; + block.capacity = sizeof(block.data); + } + + xpath_ast_node* root; + xpath_allocator alloc; + xpath_memory_block block; + bool oom; + }; + + PUGI__FN impl::xpath_ast_node* evaluate_node_set_prepare(xpath_query_impl* impl) + { + if (!impl) return 0; + + if (impl->root->rettype() != xpath_type_node_set) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return 0; + #else + xpath_parse_result res; + res.error = "Expression does not evaluate to node set"; + + throw xpath_exception(res); + #endif + } + + return impl->root; + } +PUGI__NS_END + +namespace pugi +{ +#ifndef PUGIXML_NO_EXCEPTIONS + PUGI__FN xpath_exception::xpath_exception(const xpath_parse_result& result_): _result(result_) + { + assert(_result.error); + } + + PUGI__FN const char* xpath_exception::what() const throw() + { + return _result.error; + } + + PUGI__FN const xpath_parse_result& xpath_exception::result() const + { + return _result; + } +#endif + + PUGI__FN xpath_node::xpath_node() + { + } + + PUGI__FN xpath_node::xpath_node(const xml_node& node_): _node(node_) + { + } + + PUGI__FN xpath_node::xpath_node(const xml_attribute& attribute_, const xml_node& parent_): _node(attribute_ ? parent_ : xml_node()), _attribute(attribute_) + { + } + + PUGI__FN xml_node xpath_node::node() const + { + return _attribute ? xml_node() : _node; + } + + PUGI__FN xml_attribute xpath_node::attribute() const + { + return _attribute; + } + + PUGI__FN xml_node xpath_node::parent() const + { + return _attribute ? _node : _node.parent(); + } + + PUGI__FN static void unspecified_bool_xpath_node(xpath_node***) + { + } + + PUGI__FN xpath_node::operator xpath_node::unspecified_bool_type() const + { + return (_node || _attribute) ? unspecified_bool_xpath_node : 0; + } + + PUGI__FN bool xpath_node::operator!() const + { + return !(_node || _attribute); + } + + PUGI__FN bool xpath_node::operator==(const xpath_node& n) const + { + return _node == n._node && _attribute == n._attribute; + } + + PUGI__FN bool xpath_node::operator!=(const xpath_node& n) const + { + return _node != n._node || _attribute != n._attribute; + } + +#ifdef __BORLANDC__ + PUGI__FN bool operator&&(const xpath_node& lhs, bool rhs) + { + return (bool)lhs && rhs; + } + + PUGI__FN bool operator||(const xpath_node& lhs, bool rhs) + { + return (bool)lhs || rhs; + } +#endif + + PUGI__FN void xpath_node_set::_assign(const_iterator begin_, const_iterator end_, type_t type_) + { + assert(begin_ <= end_); + + size_t size_ = static_cast(end_ - begin_); + + // use internal buffer for 0 or 1 elements, heap buffer otherwise + xpath_node* storage = (size_ <= 1) ? _storage : static_cast(impl::xml_memory::allocate(size_ * sizeof(xpath_node))); + + if (!storage) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return; + #else + throw std::bad_alloc(); + #endif + } + + // deallocate old buffer + if (_begin != _storage) + impl::xml_memory::deallocate(_begin); + + // size check is necessary because for begin_ = end_ = nullptr, memcpy is UB + if (size_) + memcpy(storage, begin_, size_ * sizeof(xpath_node)); + + _begin = storage; + _end = storage + size_; + _type = type_; + } + +#ifdef PUGIXML_HAS_MOVE + PUGI__FN void xpath_node_set::_move(xpath_node_set& rhs) PUGIXML_NOEXCEPT + { + _type = rhs._type; + _storage[0] = rhs._storage[0]; + _begin = (rhs._begin == rhs._storage) ? _storage : rhs._begin; + _end = _begin + (rhs._end - rhs._begin); + + rhs._type = type_unsorted; + rhs._begin = rhs._storage; + rhs._end = rhs._storage; + } +#endif + + PUGI__FN xpath_node_set::xpath_node_set(): _type(type_unsorted), _begin(_storage), _end(_storage) + { + } + + PUGI__FN xpath_node_set::xpath_node_set(const_iterator begin_, const_iterator end_, type_t type_): _type(type_unsorted), _begin(_storage), _end(_storage) + { + _assign(begin_, end_, type_); + } + + PUGI__FN xpath_node_set::~xpath_node_set() + { + if (_begin != _storage) + impl::xml_memory::deallocate(_begin); + } + + PUGI__FN xpath_node_set::xpath_node_set(const xpath_node_set& ns): _type(type_unsorted), _begin(_storage), _end(_storage) + { + _assign(ns._begin, ns._end, ns._type); + } + + PUGI__FN xpath_node_set& xpath_node_set::operator=(const xpath_node_set& ns) + { + if (this == &ns) return *this; + + _assign(ns._begin, ns._end, ns._type); + + return *this; + } + +#ifdef PUGIXML_HAS_MOVE + PUGI__FN xpath_node_set::xpath_node_set(xpath_node_set&& rhs) PUGIXML_NOEXCEPT: _type(type_unsorted), _begin(_storage), _end(_storage) + { + _move(rhs); + } + + PUGI__FN xpath_node_set& xpath_node_set::operator=(xpath_node_set&& rhs) PUGIXML_NOEXCEPT + { + if (this == &rhs) return *this; + + if (_begin != _storage) + impl::xml_memory::deallocate(_begin); + + _move(rhs); + + return *this; + } +#endif + + PUGI__FN xpath_node_set::type_t xpath_node_set::type() const + { + return _type; + } + + PUGI__FN size_t xpath_node_set::size() const + { + return _end - _begin; + } + + PUGI__FN bool xpath_node_set::empty() const + { + return _begin == _end; + } + + PUGI__FN const xpath_node& xpath_node_set::operator[](size_t index) const + { + assert(index < size()); + return _begin[index]; + } + + PUGI__FN xpath_node_set::const_iterator xpath_node_set::begin() const + { + return _begin; + } + + PUGI__FN xpath_node_set::const_iterator xpath_node_set::end() const + { + return _end; + } + + PUGI__FN void xpath_node_set::sort(bool reverse) + { + _type = impl::xpath_sort(_begin, _end, _type, reverse); + } + + PUGI__FN xpath_node xpath_node_set::first() const + { + return impl::xpath_first(_begin, _end, _type); + } + + PUGI__FN xpath_parse_result::xpath_parse_result(): error("Internal error"), offset(0) + { + } + + PUGI__FN xpath_parse_result::operator bool() const + { + return error == 0; + } + + PUGI__FN const char* xpath_parse_result::description() const + { + return error ? error : "No error"; + } + + PUGI__FN xpath_variable::xpath_variable(xpath_value_type type_): _type(type_), _next(0) + { + } + + PUGI__FN const char_t* xpath_variable::name() const + { + switch (_type) + { + case xpath_type_node_set: + return static_cast(this)->name; + + case xpath_type_number: + return static_cast(this)->name; + + case xpath_type_string: + return static_cast(this)->name; + + case xpath_type_boolean: + return static_cast(this)->name; + + default: + assert(false && "Invalid variable type"); // unreachable + return 0; + } + } + + PUGI__FN xpath_value_type xpath_variable::type() const + { + return _type; + } + + PUGI__FN bool xpath_variable::get_boolean() const + { + return (_type == xpath_type_boolean) ? static_cast(this)->value : false; + } + + PUGI__FN double xpath_variable::get_number() const + { + return (_type == xpath_type_number) ? static_cast(this)->value : impl::gen_nan(); + } + + PUGI__FN const char_t* xpath_variable::get_string() const + { + const char_t* value = (_type == xpath_type_string) ? static_cast(this)->value : 0; + return value ? value : PUGIXML_TEXT(""); + } + + PUGI__FN const xpath_node_set& xpath_variable::get_node_set() const + { + return (_type == xpath_type_node_set) ? static_cast(this)->value : impl::dummy_node_set; + } + + PUGI__FN bool xpath_variable::set(bool value) + { + if (_type != xpath_type_boolean) return false; + + static_cast(this)->value = value; + return true; + } + + PUGI__FN bool xpath_variable::set(double value) + { + if (_type != xpath_type_number) return false; + + static_cast(this)->value = value; + return true; + } + + PUGI__FN bool xpath_variable::set(const char_t* value) + { + if (_type != xpath_type_string) return false; + + impl::xpath_variable_string* var = static_cast(this); + + // duplicate string + size_t size = (impl::strlength(value) + 1) * sizeof(char_t); + + char_t* copy = static_cast(impl::xml_memory::allocate(size)); + if (!copy) return false; + + memcpy(copy, value, size); + + // replace old string + if (var->value) impl::xml_memory::deallocate(var->value); + var->value = copy; + + return true; + } + + PUGI__FN bool xpath_variable::set(const xpath_node_set& value) + { + if (_type != xpath_type_node_set) return false; + + static_cast(this)->value = value; + return true; + } + + PUGI__FN xpath_variable_set::xpath_variable_set() + { + for (size_t i = 0; i < sizeof(_data) / sizeof(_data[0]); ++i) + _data[i] = 0; + } + + PUGI__FN xpath_variable_set::~xpath_variable_set() + { + for (size_t i = 0; i < sizeof(_data) / sizeof(_data[0]); ++i) + _destroy(_data[i]); + } + + PUGI__FN xpath_variable_set::xpath_variable_set(const xpath_variable_set& rhs) + { + for (size_t i = 0; i < sizeof(_data) / sizeof(_data[0]); ++i) + _data[i] = 0; + + _assign(rhs); + } + + PUGI__FN xpath_variable_set& xpath_variable_set::operator=(const xpath_variable_set& rhs) + { + if (this == &rhs) return *this; + + _assign(rhs); + + return *this; + } + +#ifdef PUGIXML_HAS_MOVE + PUGI__FN xpath_variable_set::xpath_variable_set(xpath_variable_set&& rhs) PUGIXML_NOEXCEPT + { + for (size_t i = 0; i < sizeof(_data) / sizeof(_data[0]); ++i) + { + _data[i] = rhs._data[i]; + rhs._data[i] = 0; + } + } + + PUGI__FN xpath_variable_set& xpath_variable_set::operator=(xpath_variable_set&& rhs) PUGIXML_NOEXCEPT + { + for (size_t i = 0; i < sizeof(_data) / sizeof(_data[0]); ++i) + { + _destroy(_data[i]); + + _data[i] = rhs._data[i]; + rhs._data[i] = 0; + } + + return *this; + } +#endif + + PUGI__FN void xpath_variable_set::_assign(const xpath_variable_set& rhs) + { + xpath_variable_set temp; + + for (size_t i = 0; i < sizeof(_data) / sizeof(_data[0]); ++i) + if (rhs._data[i] && !_clone(rhs._data[i], &temp._data[i])) + return; + + _swap(temp); + } + + PUGI__FN void xpath_variable_set::_swap(xpath_variable_set& rhs) + { + for (size_t i = 0; i < sizeof(_data) / sizeof(_data[0]); ++i) + { + xpath_variable* chain = _data[i]; + + _data[i] = rhs._data[i]; + rhs._data[i] = chain; + } + } + + PUGI__FN xpath_variable* xpath_variable_set::_find(const char_t* name) const + { + const size_t hash_size = sizeof(_data) / sizeof(_data[0]); + size_t hash = impl::hash_string(name) % hash_size; + + // look for existing variable + for (xpath_variable* var = _data[hash]; var; var = var->_next) + if (impl::strequal(var->name(), name)) + return var; + + return 0; + } + + PUGI__FN bool xpath_variable_set::_clone(xpath_variable* var, xpath_variable** out_result) + { + xpath_variable* last = 0; + + while (var) + { + // allocate storage for new variable + xpath_variable* nvar = impl::new_xpath_variable(var->_type, var->name()); + if (!nvar) return false; + + // link the variable to the result immediately to handle failures gracefully + if (last) + last->_next = nvar; + else + *out_result = nvar; + + last = nvar; + + // copy the value; this can fail due to out-of-memory conditions + if (!impl::copy_xpath_variable(nvar, var)) return false; + + var = var->_next; + } + + return true; + } + + PUGI__FN void xpath_variable_set::_destroy(xpath_variable* var) + { + while (var) + { + xpath_variable* next = var->_next; + + impl::delete_xpath_variable(var->_type, var); + + var = next; + } + } + + PUGI__FN xpath_variable* xpath_variable_set::add(const char_t* name, xpath_value_type type) + { + const size_t hash_size = sizeof(_data) / sizeof(_data[0]); + size_t hash = impl::hash_string(name) % hash_size; + + // look for existing variable + for (xpath_variable* var = _data[hash]; var; var = var->_next) + if (impl::strequal(var->name(), name)) + return var->type() == type ? var : 0; + + // add new variable + xpath_variable* result = impl::new_xpath_variable(type, name); + + if (result) + { + result->_next = _data[hash]; + + _data[hash] = result; + } + + return result; + } + + PUGI__FN bool xpath_variable_set::set(const char_t* name, bool value) + { + xpath_variable* var = add(name, xpath_type_boolean); + return var ? var->set(value) : false; + } + + PUGI__FN bool xpath_variable_set::set(const char_t* name, double value) + { + xpath_variable* var = add(name, xpath_type_number); + return var ? var->set(value) : false; + } + + PUGI__FN bool xpath_variable_set::set(const char_t* name, const char_t* value) + { + xpath_variable* var = add(name, xpath_type_string); + return var ? var->set(value) : false; + } + + PUGI__FN bool xpath_variable_set::set(const char_t* name, const xpath_node_set& value) + { + xpath_variable* var = add(name, xpath_type_node_set); + return var ? var->set(value) : false; + } + + PUGI__FN xpath_variable* xpath_variable_set::get(const char_t* name) + { + return _find(name); + } + + PUGI__FN const xpath_variable* xpath_variable_set::get(const char_t* name) const + { + return _find(name); + } + + PUGI__FN xpath_query::xpath_query(const char_t* query, xpath_variable_set* variables): _impl(0) + { + impl::xpath_query_impl* qimpl = impl::xpath_query_impl::create(); + + if (!qimpl) + { + #ifdef PUGIXML_NO_EXCEPTIONS + _result.error = "Out of memory"; + #else + throw std::bad_alloc(); + #endif + } + else + { + using impl::auto_deleter; // MSVC7 workaround + auto_deleter impl(qimpl, impl::xpath_query_impl::destroy); + + qimpl->root = impl::xpath_parser::parse(query, variables, &qimpl->alloc, &_result); + + if (qimpl->root) + { + qimpl->root->optimize(&qimpl->alloc); + + _impl = impl.release(); + _result.error = 0; + } + else + { + #ifdef PUGIXML_NO_EXCEPTIONS + if (qimpl->oom) _result.error = "Out of memory"; + #else + if (qimpl->oom) throw std::bad_alloc(); + throw xpath_exception(_result); + #endif + } + } + } + + PUGI__FN xpath_query::xpath_query(): _impl(0) + { + } + + PUGI__FN xpath_query::~xpath_query() + { + if (_impl) + impl::xpath_query_impl::destroy(static_cast(_impl)); + } + +#ifdef PUGIXML_HAS_MOVE + PUGI__FN xpath_query::xpath_query(xpath_query&& rhs) PUGIXML_NOEXCEPT + { + _impl = rhs._impl; + _result = rhs._result; + rhs._impl = 0; + rhs._result = xpath_parse_result(); + } + + PUGI__FN xpath_query& xpath_query::operator=(xpath_query&& rhs) PUGIXML_NOEXCEPT + { + if (this == &rhs) return *this; + + if (_impl) + impl::xpath_query_impl::destroy(static_cast(_impl)); + + _impl = rhs._impl; + _result = rhs._result; + rhs._impl = 0; + rhs._result = xpath_parse_result(); + + return *this; + } +#endif + + PUGI__FN xpath_value_type xpath_query::return_type() const + { + if (!_impl) return xpath_type_none; + + return static_cast(_impl)->root->rettype(); + } + + PUGI__FN bool xpath_query::evaluate_boolean(const xpath_node& n) const + { + if (!_impl) return false; + + impl::xpath_context c(n, 1, 1); + impl::xpath_stack_data sd; + + bool r = static_cast(_impl)->root->eval_boolean(c, sd.stack); + + if (sd.oom) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return false; + #else + throw std::bad_alloc(); + #endif + } + + return r; + } + + PUGI__FN double xpath_query::evaluate_number(const xpath_node& n) const + { + if (!_impl) return impl::gen_nan(); + + impl::xpath_context c(n, 1, 1); + impl::xpath_stack_data sd; + + double r = static_cast(_impl)->root->eval_number(c, sd.stack); + + if (sd.oom) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return impl::gen_nan(); + #else + throw std::bad_alloc(); + #endif + } + + return r; + } + +#ifndef PUGIXML_NO_STL + PUGI__FN string_t xpath_query::evaluate_string(const xpath_node& n) const + { + if (!_impl) return string_t(); + + impl::xpath_context c(n, 1, 1); + impl::xpath_stack_data sd; + + impl::xpath_string r = static_cast(_impl)->root->eval_string(c, sd.stack); + + if (sd.oom) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return string_t(); + #else + throw std::bad_alloc(); + #endif + } + + return string_t(r.c_str(), r.length()); + } +#endif + + PUGI__FN size_t xpath_query::evaluate_string(char_t* buffer, size_t capacity, const xpath_node& n) const + { + impl::xpath_context c(n, 1, 1); + impl::xpath_stack_data sd; + + impl::xpath_string r = _impl ? static_cast(_impl)->root->eval_string(c, sd.stack) : impl::xpath_string(); + + if (sd.oom) + { + #ifdef PUGIXML_NO_EXCEPTIONS + r = impl::xpath_string(); + #else + throw std::bad_alloc(); + #endif + } + + size_t full_size = r.length() + 1; + + if (capacity > 0) + { + size_t size = (full_size < capacity) ? full_size : capacity; + assert(size > 0); + + memcpy(buffer, r.c_str(), (size - 1) * sizeof(char_t)); + buffer[size - 1] = 0; + } + + return full_size; + } + + PUGI__FN xpath_node_set xpath_query::evaluate_node_set(const xpath_node& n) const + { + impl::xpath_ast_node* root = impl::evaluate_node_set_prepare(static_cast(_impl)); + if (!root) return xpath_node_set(); + + impl::xpath_context c(n, 1, 1); + impl::xpath_stack_data sd; + + impl::xpath_node_set_raw r = root->eval_node_set(c, sd.stack, impl::nodeset_eval_all); + + if (sd.oom) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return xpath_node_set(); + #else + throw std::bad_alloc(); + #endif + } + + return xpath_node_set(r.begin(), r.end(), r.type()); + } + + PUGI__FN xpath_node xpath_query::evaluate_node(const xpath_node& n) const + { + impl::xpath_ast_node* root = impl::evaluate_node_set_prepare(static_cast(_impl)); + if (!root) return xpath_node(); + + impl::xpath_context c(n, 1, 1); + impl::xpath_stack_data sd; + + impl::xpath_node_set_raw r = root->eval_node_set(c, sd.stack, impl::nodeset_eval_first); + + if (sd.oom) + { + #ifdef PUGIXML_NO_EXCEPTIONS + return xpath_node(); + #else + throw std::bad_alloc(); + #endif + } + + return r.first(); + } + + PUGI__FN const xpath_parse_result& xpath_query::result() const + { + return _result; + } + + PUGI__FN static void unspecified_bool_xpath_query(xpath_query***) + { + } + + PUGI__FN xpath_query::operator xpath_query::unspecified_bool_type() const + { + return _impl ? unspecified_bool_xpath_query : 0; + } + + PUGI__FN bool xpath_query::operator!() const + { + return !_impl; + } + + PUGI__FN xpath_node xml_node::select_node(const char_t* query, xpath_variable_set* variables) const + { + xpath_query q(query, variables); + return q.evaluate_node(*this); + } + + PUGI__FN xpath_node xml_node::select_node(const xpath_query& query) const + { + return query.evaluate_node(*this); + } + + PUGI__FN xpath_node_set xml_node::select_nodes(const char_t* query, xpath_variable_set* variables) const + { + xpath_query q(query, variables); + return q.evaluate_node_set(*this); + } + + PUGI__FN xpath_node_set xml_node::select_nodes(const xpath_query& query) const + { + return query.evaluate_node_set(*this); + } + + PUGI__FN xpath_node xml_node::select_single_node(const char_t* query, xpath_variable_set* variables) const + { + xpath_query q(query, variables); + return q.evaluate_node(*this); + } + + PUGI__FN xpath_node xml_node::select_single_node(const xpath_query& query) const + { + return query.evaluate_node(*this); + } +} + +#endif + +#ifdef __BORLANDC__ +# pragma option pop +#endif + +// Intel C++ does not properly keep warning state for function templates, +// so popping warning state at the end of translation unit leads to warnings in the middle. +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +# pragma warning(pop) +#endif + +#if defined(_MSC_VER) && defined(__c2__) +# pragma clang diagnostic pop +#endif + +// Undefine all local macros (makes sure we're not leaking macros in header-only mode) +#undef PUGI__NO_INLINE +#undef PUGI__UNLIKELY +#undef PUGI__STATIC_ASSERT +#undef PUGI__DMC_VOLATILE +#undef PUGI__UNSIGNED_OVERFLOW +#undef PUGI__MSVC_CRT_VERSION +#undef PUGI__SNPRINTF +#undef PUGI__NS_BEGIN +#undef PUGI__NS_END +#undef PUGI__FN +#undef PUGI__FN_NO_INLINE +#undef PUGI__GETHEADER_IMPL +#undef PUGI__GETPAGE_IMPL +#undef PUGI__GETPAGE +#undef PUGI__NODETYPE +#undef PUGI__IS_CHARTYPE_IMPL +#undef PUGI__IS_CHARTYPE +#undef PUGI__IS_CHARTYPEX +#undef PUGI__ENDSWITH +#undef PUGI__SKIPWS +#undef PUGI__OPTSET +#undef PUGI__PUSHNODE +#undef PUGI__POPNODE +#undef PUGI__SCANFOR +#undef PUGI__SCANWHILE +#undef PUGI__SCANWHILE_UNROLL +#undef PUGI__ENDSEG +#undef PUGI__THROW_ERROR +#undef PUGI__CHECK_ERROR + +#endif + +/** + * Copyright (c) 2006-2022 Arseny Kapoulkine + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ diff --git a/third_party/snowpack/plugins/pugixml/pugixml.hpp b/third_party/snowpack/plugins/pugixml/pugixml.hpp new file mode 100644 index 00000000..579f1439 --- /dev/null +++ b/third_party/snowpack/plugins/pugixml/pugixml.hpp @@ -0,0 +1,1501 @@ +/** + * pugixml parser - version 1.12 + * -------------------------------------------------------- + * Copyright (C) 2006-2022, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com) + * Report bugs and download new versions at https://pugixml.org/ + * + * This library is distributed under the MIT License. See notice at the end + * of this file. + * + * This work is based on the pugxml parser, which is: + * Copyright (C) 2003, by Kristen Wegner (kristen@tima.net) + */ + +// Define version macro; evaluates to major * 1000 + minor * 10 + patch so that it's safe to use in less-than comparisons +// Note: pugixml used major * 100 + minor * 10 + patch format up until 1.9 (which had version identifier 190); starting from pugixml 1.10, the minor version number is two digits +#ifndef PUGIXML_VERSION +# define PUGIXML_VERSION 1120 // 1.12 +#endif + +// Include user configuration file (this can define various configuration macros) +#include "pugiconfig.hpp" + +#ifndef HEADER_PUGIXML_HPP +#define HEADER_PUGIXML_HPP + +// Include stddef.h for size_t and ptrdiff_t +#include + +// Include exception header for XPath +#if !defined(PUGIXML_NO_XPATH) && !defined(PUGIXML_NO_EXCEPTIONS) +# include +#endif + +// Include STL headers +#ifndef PUGIXML_NO_STL +# include +# include +# include +#endif + +// Macro for deprecated features +#ifndef PUGIXML_DEPRECATED +# if defined(__GNUC__) +# define PUGIXML_DEPRECATED __attribute__((deprecated)) +# elif defined(_MSC_VER) && _MSC_VER >= 1300 +# define PUGIXML_DEPRECATED __declspec(deprecated) +# else +# define PUGIXML_DEPRECATED +# endif +#endif + +// If no API is defined, assume default +#ifndef PUGIXML_API +# define PUGIXML_API +#endif + +// If no API for classes is defined, assume default +#ifndef PUGIXML_CLASS +# define PUGIXML_CLASS PUGIXML_API +#endif + +// If no API for functions is defined, assume default +#ifndef PUGIXML_FUNCTION +# define PUGIXML_FUNCTION PUGIXML_API +#endif + +// If the platform is known to have long long support, enable long long functions +#ifndef PUGIXML_HAS_LONG_LONG +# if __cplusplus >= 201103 +# define PUGIXML_HAS_LONG_LONG +# elif defined(_MSC_VER) && _MSC_VER >= 1400 +# define PUGIXML_HAS_LONG_LONG +# endif +#endif + +// If the platform is known to have move semantics support, compile move ctor/operator implementation +#ifndef PUGIXML_HAS_MOVE +# if __cplusplus >= 201103 +# define PUGIXML_HAS_MOVE +# elif defined(_MSC_VER) && _MSC_VER >= 1600 +# define PUGIXML_HAS_MOVE +# endif +#endif + +// If C++ is 2011 or higher, add 'noexcept' specifiers +#ifndef PUGIXML_NOEXCEPT +# if __cplusplus >= 201103 +# define PUGIXML_NOEXCEPT noexcept +# elif defined(_MSC_VER) && _MSC_VER >= 1900 +# define PUGIXML_NOEXCEPT noexcept +# else +# define PUGIXML_NOEXCEPT +# endif +#endif + +// Some functions can not be noexcept in compact mode +#ifdef PUGIXML_COMPACT +# define PUGIXML_NOEXCEPT_IF_NOT_COMPACT +#else +# define PUGIXML_NOEXCEPT_IF_NOT_COMPACT PUGIXML_NOEXCEPT +#endif + +// If C++ is 2011 or higher, add 'override' qualifiers +#ifndef PUGIXML_OVERRIDE +# if __cplusplus >= 201103 +# define PUGIXML_OVERRIDE override +# elif defined(_MSC_VER) && _MSC_VER >= 1700 +# define PUGIXML_OVERRIDE override +# else +# define PUGIXML_OVERRIDE +# endif +#endif + +// If C++ is 2011 or higher, use 'nullptr' +#ifndef PUGIXML_NULL +# if __cplusplus >= 201103 +# define PUGIXML_NULL nullptr +# else +# define PUGIXML_NULL 0 +# endif +#endif + +// Character interface macros +#ifdef PUGIXML_WCHAR_MODE +# define PUGIXML_TEXT(t) L ## t +# define PUGIXML_CHAR wchar_t +#else +# define PUGIXML_TEXT(t) t +# define PUGIXML_CHAR char +#endif + +namespace pugi +{ + // Character type used for all internal storage and operations; depends on PUGIXML_WCHAR_MODE + typedef PUGIXML_CHAR char_t; + +#ifndef PUGIXML_NO_STL + // String type used for operations that work with STL string; depends on PUGIXML_WCHAR_MODE + typedef std::basic_string, std::allocator > string_t; +#endif +} + +// The PugiXML namespace +namespace pugi +{ + // Tree node types + enum xml_node_type + { + node_null, // Empty (null) node handle + node_document, // A document tree's absolute root + node_element, // Element tag, i.e. '' + node_pcdata, // Plain character data, i.e. 'text' + node_cdata, // Character data, i.e. '' + node_comment, // Comment tag, i.e. '' + node_pi, // Processing instruction, i.e. '' + node_declaration, // Document declaration, i.e. '' + node_doctype // Document type declaration, i.e. '' + }; + + // Parsing options + + // Minimal parsing mode (equivalent to turning all other flags off). + // Only elements and PCDATA sections are added to the DOM tree, no text conversions are performed. + const unsigned int parse_minimal = 0x0000; + + // This flag determines if processing instructions (node_pi) are added to the DOM tree. This flag is off by default. + const unsigned int parse_pi = 0x0001; + + // This flag determines if comments (node_comment) are added to the DOM tree. This flag is off by default. + const unsigned int parse_comments = 0x0002; + + // This flag determines if CDATA sections (node_cdata) are added to the DOM tree. This flag is on by default. + const unsigned int parse_cdata = 0x0004; + + // This flag determines if plain character data (node_pcdata) that consist only of whitespace are added to the DOM tree. + // This flag is off by default; turning it on usually results in slower parsing and more memory consumption. + const unsigned int parse_ws_pcdata = 0x0008; + + // This flag determines if character and entity references are expanded during parsing. This flag is on by default. + const unsigned int parse_escapes = 0x0010; + + // This flag determines if EOL characters are normalized (converted to #xA) during parsing. This flag is on by default. + const unsigned int parse_eol = 0x0020; + + // This flag determines if attribute values are normalized using CDATA normalization rules during parsing. This flag is on by default. + const unsigned int parse_wconv_attribute = 0x0040; + + // This flag determines if attribute values are normalized using NMTOKENS normalization rules during parsing. This flag is off by default. + const unsigned int parse_wnorm_attribute = 0x0080; + + // This flag determines if document declaration (node_declaration) is added to the DOM tree. This flag is off by default. + const unsigned int parse_declaration = 0x0100; + + // This flag determines if document type declaration (node_doctype) is added to the DOM tree. This flag is off by default. + const unsigned int parse_doctype = 0x0200; + + // This flag determines if plain character data (node_pcdata) that is the only child of the parent node and that consists only + // of whitespace is added to the DOM tree. + // This flag is off by default; turning it on may result in slower parsing and more memory consumption. + const unsigned int parse_ws_pcdata_single = 0x0400; + + // This flag determines if leading and trailing whitespace is to be removed from plain character data. This flag is off by default. + const unsigned int parse_trim_pcdata = 0x0800; + + // This flag determines if plain character data that does not have a parent node is added to the DOM tree, and if an empty document + // is a valid document. This flag is off by default. + const unsigned int parse_fragment = 0x1000; + + // This flag determines if plain character data is be stored in the parent element's value. This significantly changes the structure of + // the document; this flag is only recommended for parsing documents with many PCDATA nodes in memory-constrained environments. + // This flag is off by default. + const unsigned int parse_embed_pcdata = 0x2000; + + // The default parsing mode. + // Elements, PCDATA and CDATA sections are added to the DOM tree, character/reference entities are expanded, + // End-of-Line characters are normalized, attribute values are normalized using CDATA normalization rules. + const unsigned int parse_default = parse_cdata | parse_escapes | parse_wconv_attribute | parse_eol; + + // The full parsing mode. + // Nodes of all types are added to the DOM tree, character/reference entities are expanded, + // End-of-Line characters are normalized, attribute values are normalized using CDATA normalization rules. + const unsigned int parse_full = parse_default | parse_pi | parse_comments | parse_declaration | parse_doctype; + + // These flags determine the encoding of input data for XML document + enum xml_encoding + { + encoding_auto, // Auto-detect input encoding using BOM or < / class xml_object_range + { + public: + typedef It const_iterator; + typedef It iterator; + + xml_object_range(It b, It e): _begin(b), _end(e) + { + } + + It begin() const { return _begin; } + It end() const { return _end; } + + bool empty() const { return _begin == _end; } + + private: + It _begin, _end; + }; + + // Writer interface for node printing (see xml_node::print) + class PUGIXML_CLASS xml_writer + { + public: + virtual ~xml_writer() {} + + // Write memory chunk into stream/file/whatever + virtual void write(const void* data, size_t size) = 0; + }; + + // xml_writer implementation for FILE* + class PUGIXML_CLASS xml_writer_file: public xml_writer + { + public: + // Construct writer from a FILE* object; void* is used to avoid header dependencies on stdio + xml_writer_file(void* file); + + virtual void write(const void* data, size_t size) PUGIXML_OVERRIDE; + + private: + void* file; + }; + + #ifndef PUGIXML_NO_STL + // xml_writer implementation for streams + class PUGIXML_CLASS xml_writer_stream: public xml_writer + { + public: + // Construct writer from an output stream object + xml_writer_stream(std::basic_ostream >& stream); + xml_writer_stream(std::basic_ostream >& stream); + + virtual void write(const void* data, size_t size) PUGIXML_OVERRIDE; + + private: + std::basic_ostream >* narrow_stream; + std::basic_ostream >* wide_stream; + }; + #endif + + // A light-weight handle for manipulating attributes in DOM tree + class PUGIXML_CLASS xml_attribute + { + friend class xml_attribute_iterator; + friend class xml_node; + + private: + xml_attribute_struct* _attr; + + typedef void (*unspecified_bool_type)(xml_attribute***); + + public: + // Default constructor. Constructs an empty attribute. + xml_attribute(); + + // Constructs attribute from internal pointer + explicit xml_attribute(xml_attribute_struct* attr); + + // Safe bool conversion operator + operator unspecified_bool_type() const; + + // Borland C++ workaround + bool operator!() const; + + // Comparison operators (compares wrapped attribute pointers) + bool operator==(const xml_attribute& r) const; + bool operator!=(const xml_attribute& r) const; + bool operator<(const xml_attribute& r) const; + bool operator>(const xml_attribute& r) const; + bool operator<=(const xml_attribute& r) const; + bool operator>=(const xml_attribute& r) const; + + // Check if attribute is empty + bool empty() const; + + // Get attribute name/value, or "" if attribute is empty + const char_t* name() const; + const char_t* value() const; + + // Get attribute value, or the default value if attribute is empty + const char_t* as_string(const char_t* def = PUGIXML_TEXT("")) const; + + // Get attribute value as a number, or the default value if conversion did not succeed or attribute is empty + int as_int(int def = 0) const; + unsigned int as_uint(unsigned int def = 0) const; + double as_double(double def = 0) const; + float as_float(float def = 0) const; + + #ifdef PUGIXML_HAS_LONG_LONG + long long as_llong(long long def = 0) const; + unsigned long long as_ullong(unsigned long long def = 0) const; + #endif + + // Get attribute value as bool (returns true if first character is in '1tTyY' set), or the default value if attribute is empty + bool as_bool(bool def = false) const; + + // Set attribute name/value (returns false if attribute is empty or there is not enough memory) + bool set_name(const char_t* rhs); + bool set_value(const char_t* rhs); + + // Set attribute value with type conversion (numbers are converted to strings, boolean is converted to "true"/"false") + bool set_value(int rhs); + bool set_value(unsigned int rhs); + bool set_value(long rhs); + bool set_value(unsigned long rhs); + bool set_value(double rhs); + bool set_value(double rhs, int precision); + bool set_value(float rhs); + bool set_value(float rhs, int precision); + bool set_value(bool rhs); + + #ifdef PUGIXML_HAS_LONG_LONG + bool set_value(long long rhs); + bool set_value(unsigned long long rhs); + #endif + + // Set attribute value (equivalent to set_value without error checking) + xml_attribute& operator=(const char_t* rhs); + xml_attribute& operator=(int rhs); + xml_attribute& operator=(unsigned int rhs); + xml_attribute& operator=(long rhs); + xml_attribute& operator=(unsigned long rhs); + xml_attribute& operator=(double rhs); + xml_attribute& operator=(float rhs); + xml_attribute& operator=(bool rhs); + + #ifdef PUGIXML_HAS_LONG_LONG + xml_attribute& operator=(long long rhs); + xml_attribute& operator=(unsigned long long rhs); + #endif + + // Get next/previous attribute in the attribute list of the parent node + xml_attribute next_attribute() const; + xml_attribute previous_attribute() const; + + // Get hash value (unique for handles to the same object) + size_t hash_value() const; + + // Get internal pointer + xml_attribute_struct* internal_object() const; + }; + +#ifdef __BORLANDC__ + // Borland C++ workaround + bool PUGIXML_FUNCTION operator&&(const xml_attribute& lhs, bool rhs); + bool PUGIXML_FUNCTION operator||(const xml_attribute& lhs, bool rhs); +#endif + + // A light-weight handle for manipulating nodes in DOM tree + class PUGIXML_CLASS xml_node + { + friend class xml_attribute_iterator; + friend class xml_node_iterator; + friend class xml_named_node_iterator; + + protected: + xml_node_struct* _root; + + typedef void (*unspecified_bool_type)(xml_node***); + + public: + // Default constructor. Constructs an empty node. + xml_node(); + + // Constructs node from internal pointer + explicit xml_node(xml_node_struct* p); + + // Safe bool conversion operator + operator unspecified_bool_type() const; + + // Borland C++ workaround + bool operator!() const; + + // Comparison operators (compares wrapped node pointers) + bool operator==(const xml_node& r) const; + bool operator!=(const xml_node& r) const; + bool operator<(const xml_node& r) const; + bool operator>(const xml_node& r) const; + bool operator<=(const xml_node& r) const; + bool operator>=(const xml_node& r) const; + + // Check if node is empty. + bool empty() const; + + // Get node type + xml_node_type type() const; + + // Get node name, or "" if node is empty or it has no name + const char_t* name() const; + + // Get node value, or "" if node is empty or it has no value + // Note: For text node.value() does not return "text"! Use child_value() or text() methods to access text inside nodes. + const char_t* value() const; + + // Get attribute list + xml_attribute first_attribute() const; + xml_attribute last_attribute() const; + + // Get children list + xml_node first_child() const; + xml_node last_child() const; + + // Get next/previous sibling in the children list of the parent node + xml_node next_sibling() const; + xml_node previous_sibling() const; + + // Get parent node + xml_node parent() const; + + // Get root of DOM tree this node belongs to + xml_node root() const; + + // Get text object for the current node + xml_text text() const; + + // Get child, attribute or next/previous sibling with the specified name + xml_node child(const char_t* name) const; + xml_attribute attribute(const char_t* name) const; + xml_node next_sibling(const char_t* name) const; + xml_node previous_sibling(const char_t* name) const; + + // Get attribute, starting the search from a hint (and updating hint so that searching for a sequence of attributes is fast) + xml_attribute attribute(const char_t* name, xml_attribute& hint) const; + + // Get child value of current node; that is, value of the first child node of type PCDATA/CDATA + const char_t* child_value() const; + + // Get child value of child with specified name. Equivalent to child(name).child_value(). + const char_t* child_value(const char_t* name) const; + + // Set node name/value (returns false if node is empty, there is not enough memory, or node can not have name/value) + bool set_name(const char_t* rhs); + bool set_value(const char_t* rhs); + + // Add attribute with specified name. Returns added attribute, or empty attribute on errors. + xml_attribute append_attribute(const char_t* name); + xml_attribute prepend_attribute(const char_t* name); + xml_attribute insert_attribute_after(const char_t* name, const xml_attribute& attr); + xml_attribute insert_attribute_before(const char_t* name, const xml_attribute& attr); + + // Add a copy of the specified attribute. Returns added attribute, or empty attribute on errors. + xml_attribute append_copy(const xml_attribute& proto); + xml_attribute prepend_copy(const xml_attribute& proto); + xml_attribute insert_copy_after(const xml_attribute& proto, const xml_attribute& attr); + xml_attribute insert_copy_before(const xml_attribute& proto, const xml_attribute& attr); + + // Add child node with specified type. Returns added node, or empty node on errors. + xml_node append_child(xml_node_type type = node_element); + xml_node prepend_child(xml_node_type type = node_element); + xml_node insert_child_after(xml_node_type type, const xml_node& node); + xml_node insert_child_before(xml_node_type type, const xml_node& node); + + // Add child element with specified name. Returns added node, or empty node on errors. + xml_node append_child(const char_t* name); + xml_node prepend_child(const char_t* name); + xml_node insert_child_after(const char_t* name, const xml_node& node); + xml_node insert_child_before(const char_t* name, const xml_node& node); + + // Add a copy of the specified node as a child. Returns added node, or empty node on errors. + xml_node append_copy(const xml_node& proto); + xml_node prepend_copy(const xml_node& proto); + xml_node insert_copy_after(const xml_node& proto, const xml_node& node); + xml_node insert_copy_before(const xml_node& proto, const xml_node& node); + + // Move the specified node to become a child of this node. Returns moved node, or empty node on errors. + xml_node append_move(const xml_node& moved); + xml_node prepend_move(const xml_node& moved); + xml_node insert_move_after(const xml_node& moved, const xml_node& node); + xml_node insert_move_before(const xml_node& moved, const xml_node& node); + + // Remove specified attribute + bool remove_attribute(const xml_attribute& a); + bool remove_attribute(const char_t* name); + + // Remove all attributes + bool remove_attributes(); + + // Remove specified child + bool remove_child(const xml_node& n); + bool remove_child(const char_t* name); + + // Remove all children + bool remove_children(); + + // Parses buffer as an XML document fragment and appends all nodes as children of the current node. + // Copies/converts the buffer, so it may be deleted or changed after the function returns. + // Note: append_buffer allocates memory that has the lifetime of the owning document; removing the appended nodes does not immediately reclaim that memory. + xml_parse_result append_buffer(const void* contents, size_t size, unsigned int options = parse_default, xml_encoding encoding = encoding_auto); + + // Find attribute using predicate. Returns first attribute for which predicate returned true. + template xml_attribute find_attribute(Predicate pred) const + { + if (!_root) return xml_attribute(); + + for (xml_attribute attrib = first_attribute(); attrib; attrib = attrib.next_attribute()) + if (pred(attrib)) + return attrib; + + return xml_attribute(); + } + + // Find child node using predicate. Returns first child for which predicate returned true. + template xml_node find_child(Predicate pred) const + { + if (!_root) return xml_node(); + + for (xml_node node = first_child(); node; node = node.next_sibling()) + if (pred(node)) + return node; + + return xml_node(); + } + + // Find node from subtree using predicate. Returns first node from subtree (depth-first), for which predicate returned true. + template xml_node find_node(Predicate pred) const + { + if (!_root) return xml_node(); + + xml_node cur = first_child(); + + while (cur._root && cur._root != _root) + { + if (pred(cur)) return cur; + + if (cur.first_child()) cur = cur.first_child(); + else if (cur.next_sibling()) cur = cur.next_sibling(); + else + { + while (!cur.next_sibling() && cur._root != _root) cur = cur.parent(); + + if (cur._root != _root) cur = cur.next_sibling(); + } + } + + return xml_node(); + } + + // Find child node by attribute name/value + xml_node find_child_by_attribute(const char_t* name, const char_t* attr_name, const char_t* attr_value) const; + xml_node find_child_by_attribute(const char_t* attr_name, const char_t* attr_value) const; + + #ifndef PUGIXML_NO_STL + // Get the absolute node path from root as a text string. + string_t path(char_t delimiter = '/') const; + #endif + + // Search for a node by path consisting of node names and . or .. elements. + xml_node first_element_by_path(const char_t* path, char_t delimiter = '/') const; + + // Recursively traverse subtree with xml_tree_walker + bool traverse(xml_tree_walker& walker); + + #ifndef PUGIXML_NO_XPATH + // Select single node by evaluating XPath query. Returns first node from the resulting node set. + xpath_node select_node(const char_t* query, xpath_variable_set* variables = PUGIXML_NULL) const; + xpath_node select_node(const xpath_query& query) const; + + // Select node set by evaluating XPath query + xpath_node_set select_nodes(const char_t* query, xpath_variable_set* variables = PUGIXML_NULL) const; + xpath_node_set select_nodes(const xpath_query& query) const; + + // (deprecated: use select_node instead) Select single node by evaluating XPath query. + PUGIXML_DEPRECATED xpath_node select_single_node(const char_t* query, xpath_variable_set* variables = PUGIXML_NULL) const; + PUGIXML_DEPRECATED xpath_node select_single_node(const xpath_query& query) const; + + #endif + + // Print subtree using a writer object + void print(xml_writer& writer, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default, xml_encoding encoding = encoding_auto, unsigned int depth = 0) const; + + #ifndef PUGIXML_NO_STL + // Print subtree to stream + void print(std::basic_ostream >& os, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default, xml_encoding encoding = encoding_auto, unsigned int depth = 0) const; + void print(std::basic_ostream >& os, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default, unsigned int depth = 0) const; + #endif + + // Child nodes iterators + typedef xml_node_iterator iterator; + + iterator begin() const; + iterator end() const; + + // Attribute iterators + typedef xml_attribute_iterator attribute_iterator; + + attribute_iterator attributes_begin() const; + attribute_iterator attributes_end() const; + + // Range-based for support + xml_object_range children() const; + xml_object_range children(const char_t* name) const; + xml_object_range attributes() const; + + // Get node offset in parsed file/string (in char_t units) for debugging purposes + ptrdiff_t offset_debug() const; + + // Get hash value (unique for handles to the same object) + size_t hash_value() const; + + // Get internal pointer + xml_node_struct* internal_object() const; + }; + +#ifdef __BORLANDC__ + // Borland C++ workaround + bool PUGIXML_FUNCTION operator&&(const xml_node& lhs, bool rhs); + bool PUGIXML_FUNCTION operator||(const xml_node& lhs, bool rhs); +#endif + + // A helper for working with text inside PCDATA nodes + class PUGIXML_CLASS xml_text + { + friend class xml_node; + + xml_node_struct* _root; + + typedef void (*unspecified_bool_type)(xml_text***); + + explicit xml_text(xml_node_struct* root); + + xml_node_struct* _data_new(); + xml_node_struct* _data() const; + + public: + // Default constructor. Constructs an empty object. + xml_text(); + + // Safe bool conversion operator + operator unspecified_bool_type() const; + + // Borland C++ workaround + bool operator!() const; + + // Check if text object is empty + bool empty() const; + + // Get text, or "" if object is empty + const char_t* get() const; + + // Get text, or the default value if object is empty + const char_t* as_string(const char_t* def = PUGIXML_TEXT("")) const; + + // Get text as a number, or the default value if conversion did not succeed or object is empty + int as_int(int def = 0) const; + unsigned int as_uint(unsigned int def = 0) const; + double as_double(double def = 0) const; + float as_float(float def = 0) const; + + #ifdef PUGIXML_HAS_LONG_LONG + long long as_llong(long long def = 0) const; + unsigned long long as_ullong(unsigned long long def = 0) const; + #endif + + // Get text as bool (returns true if first character is in '1tTyY' set), or the default value if object is empty + bool as_bool(bool def = false) const; + + // Set text (returns false if object is empty or there is not enough memory) + bool set(const char_t* rhs); + + // Set text with type conversion (numbers are converted to strings, boolean is converted to "true"/"false") + bool set(int rhs); + bool set(unsigned int rhs); + bool set(long rhs); + bool set(unsigned long rhs); + bool set(double rhs); + bool set(double rhs, int precision); + bool set(float rhs); + bool set(float rhs, int precision); + bool set(bool rhs); + + #ifdef PUGIXML_HAS_LONG_LONG + bool set(long long rhs); + bool set(unsigned long long rhs); + #endif + + // Set text (equivalent to set without error checking) + xml_text& operator=(const char_t* rhs); + xml_text& operator=(int rhs); + xml_text& operator=(unsigned int rhs); + xml_text& operator=(long rhs); + xml_text& operator=(unsigned long rhs); + xml_text& operator=(double rhs); + xml_text& operator=(float rhs); + xml_text& operator=(bool rhs); + + #ifdef PUGIXML_HAS_LONG_LONG + xml_text& operator=(long long rhs); + xml_text& operator=(unsigned long long rhs); + #endif + + // Get the data node (node_pcdata or node_cdata) for this object + xml_node data() const; + }; + +#ifdef __BORLANDC__ + // Borland C++ workaround + bool PUGIXML_FUNCTION operator&&(const xml_text& lhs, bool rhs); + bool PUGIXML_FUNCTION operator||(const xml_text& lhs, bool rhs); +#endif + + // Child node iterator (a bidirectional iterator over a collection of xml_node) + class PUGIXML_CLASS xml_node_iterator + { + friend class xml_node; + + private: + mutable xml_node _wrap; + xml_node _parent; + + xml_node_iterator(xml_node_struct* ref, xml_node_struct* parent); + + public: + // Iterator traits + typedef ptrdiff_t difference_type; + typedef xml_node value_type; + typedef xml_node* pointer; + typedef xml_node& reference; + + #ifndef PUGIXML_NO_STL + typedef std::bidirectional_iterator_tag iterator_category; + #endif + + // Default constructor + xml_node_iterator(); + + // Construct an iterator which points to the specified node + xml_node_iterator(const xml_node& node); + + // Iterator operators + bool operator==(const xml_node_iterator& rhs) const; + bool operator!=(const xml_node_iterator& rhs) const; + + xml_node& operator*() const; + xml_node* operator->() const; + + xml_node_iterator& operator++(); + xml_node_iterator operator++(int); + + xml_node_iterator& operator--(); + xml_node_iterator operator--(int); + }; + + // Attribute iterator (a bidirectional iterator over a collection of xml_attribute) + class PUGIXML_CLASS xml_attribute_iterator + { + friend class xml_node; + + private: + mutable xml_attribute _wrap; + xml_node _parent; + + xml_attribute_iterator(xml_attribute_struct* ref, xml_node_struct* parent); + + public: + // Iterator traits + typedef ptrdiff_t difference_type; + typedef xml_attribute value_type; + typedef xml_attribute* pointer; + typedef xml_attribute& reference; + + #ifndef PUGIXML_NO_STL + typedef std::bidirectional_iterator_tag iterator_category; + #endif + + // Default constructor + xml_attribute_iterator(); + + // Construct an iterator which points to the specified attribute + xml_attribute_iterator(const xml_attribute& attr, const xml_node& parent); + + // Iterator operators + bool operator==(const xml_attribute_iterator& rhs) const; + bool operator!=(const xml_attribute_iterator& rhs) const; + + xml_attribute& operator*() const; + xml_attribute* operator->() const; + + xml_attribute_iterator& operator++(); + xml_attribute_iterator operator++(int); + + xml_attribute_iterator& operator--(); + xml_attribute_iterator operator--(int); + }; + + // Named node range helper + class PUGIXML_CLASS xml_named_node_iterator + { + friend class xml_node; + + public: + // Iterator traits + typedef ptrdiff_t difference_type; + typedef xml_node value_type; + typedef xml_node* pointer; + typedef xml_node& reference; + + #ifndef PUGIXML_NO_STL + typedef std::bidirectional_iterator_tag iterator_category; + #endif + + // Default constructor + xml_named_node_iterator(); + + // Construct an iterator which points to the specified node + xml_named_node_iterator(const xml_node& node, const char_t* name); + + // Iterator operators + bool operator==(const xml_named_node_iterator& rhs) const; + bool operator!=(const xml_named_node_iterator& rhs) const; + + xml_node& operator*() const; + xml_node* operator->() const; + + xml_named_node_iterator& operator++(); + xml_named_node_iterator operator++(int); + + xml_named_node_iterator& operator--(); + xml_named_node_iterator operator--(int); + + private: + mutable xml_node _wrap; + xml_node _parent; + const char_t* _name; + + xml_named_node_iterator(xml_node_struct* ref, xml_node_struct* parent, const char_t* name); + }; + + // Abstract tree walker class (see xml_node::traverse) + class PUGIXML_CLASS xml_tree_walker + { + friend class xml_node; + + private: + int _depth; + + protected: + // Get current traversal depth + int depth() const; + + public: + xml_tree_walker(); + virtual ~xml_tree_walker(); + + // Callback that is called when traversal begins + virtual bool begin(xml_node& node); + + // Callback that is called for each node traversed + virtual bool for_each(xml_node& node) = 0; + + // Callback that is called when traversal ends + virtual bool end(xml_node& node); + }; + + // Parsing status, returned as part of xml_parse_result object + enum xml_parse_status + { + status_ok = 0, // No error + + status_file_not_found, // File was not found during load_file() + status_io_error, // Error reading from file/stream + status_out_of_memory, // Could not allocate memory + status_internal_error, // Internal error occurred + + status_unrecognized_tag, // Parser could not determine tag type + + status_bad_pi, // Parsing error occurred while parsing document declaration/processing instruction + status_bad_comment, // Parsing error occurred while parsing comment + status_bad_cdata, // Parsing error occurred while parsing CDATA section + status_bad_doctype, // Parsing error occurred while parsing document type declaration + status_bad_pcdata, // Parsing error occurred while parsing PCDATA section + status_bad_start_element, // Parsing error occurred while parsing start element tag + status_bad_attribute, // Parsing error occurred while parsing element attribute + status_bad_end_element, // Parsing error occurred while parsing end element tag + status_end_element_mismatch,// There was a mismatch of start-end tags (closing tag had incorrect name, some tag was not closed or there was an excessive closing tag) + + status_append_invalid_root, // Unable to append nodes since root type is not node_element or node_document (exclusive to xml_node::append_buffer) + + status_no_document_element // Parsing resulted in a document without element nodes + }; + + // Parsing result + struct PUGIXML_CLASS xml_parse_result + { + // Parsing status (see xml_parse_status) + xml_parse_status status; + + // Last parsed offset (in char_t units from start of input data) + ptrdiff_t offset; + + // Source document encoding + xml_encoding encoding; + + // Default constructor, initializes object to failed state + xml_parse_result(); + + // Cast to bool operator + operator bool() const; + + // Get error description + const char* description() const; + }; + + // Document class (DOM tree root) + class PUGIXML_CLASS xml_document: public xml_node + { + private: + char_t* _buffer; + + char _memory[192]; + + // Non-copyable semantics + xml_document(const xml_document&); + xml_document& operator=(const xml_document&); + + void _create(); + void _destroy(); + void _move(xml_document& rhs) PUGIXML_NOEXCEPT_IF_NOT_COMPACT; + + public: + // Default constructor, makes empty document + xml_document(); + + // Destructor, invalidates all node/attribute handles to this document + ~xml_document(); + + #ifdef PUGIXML_HAS_MOVE + // Move semantics support + xml_document(xml_document&& rhs) PUGIXML_NOEXCEPT_IF_NOT_COMPACT; + xml_document& operator=(xml_document&& rhs) PUGIXML_NOEXCEPT_IF_NOT_COMPACT; + #endif + + // Removes all nodes, leaving the empty document + void reset(); + + // Removes all nodes, then copies the entire contents of the specified document + void reset(const xml_document& proto); + + #ifndef PUGIXML_NO_STL + // Load document from stream. + xml_parse_result load(std::basic_istream >& stream, unsigned int options = parse_default, xml_encoding encoding = encoding_auto); + xml_parse_result load(std::basic_istream >& stream, unsigned int options = parse_default); + #endif + + // (deprecated: use load_string instead) Load document from zero-terminated string. No encoding conversions are applied. + PUGIXML_DEPRECATED xml_parse_result load(const char_t* contents, unsigned int options = parse_default); + + // Load document from zero-terminated string. No encoding conversions are applied. + xml_parse_result load_string(const char_t* contents, unsigned int options = parse_default); + + // Load document from file + xml_parse_result load_file(const char* path, unsigned int options = parse_default, xml_encoding encoding = encoding_auto); + xml_parse_result load_file(const wchar_t* path, unsigned int options = parse_default, xml_encoding encoding = encoding_auto); + + // Load document from buffer. Copies/converts the buffer, so it may be deleted or changed after the function returns. + xml_parse_result load_buffer(const void* contents, size_t size, unsigned int options = parse_default, xml_encoding encoding = encoding_auto); + + // Load document from buffer, using the buffer for in-place parsing (the buffer is modified and used for storage of document data). + // You should ensure that buffer data will persist throughout the document's lifetime, and free the buffer memory manually once document is destroyed. + xml_parse_result load_buffer_inplace(void* contents, size_t size, unsigned int options = parse_default, xml_encoding encoding = encoding_auto); + + // Load document from buffer, using the buffer for in-place parsing (the buffer is modified and used for storage of document data). + // You should allocate the buffer with pugixml allocation function; document will free the buffer when it is no longer needed (you can't use it anymore). + xml_parse_result load_buffer_inplace_own(void* contents, size_t size, unsigned int options = parse_default, xml_encoding encoding = encoding_auto); + + // Save XML document to writer (semantics is slightly different from xml_node::print, see documentation for details). + void save(xml_writer& writer, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default, xml_encoding encoding = encoding_auto) const; + + #ifndef PUGIXML_NO_STL + // Save XML document to stream (semantics is slightly different from xml_node::print, see documentation for details). + void save(std::basic_ostream >& stream, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default, xml_encoding encoding = encoding_auto) const; + void save(std::basic_ostream >& stream, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default) const; + #endif + + // Save XML to file + bool save_file(const char* path, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default, xml_encoding encoding = encoding_auto) const; + bool save_file(const wchar_t* path, const char_t* indent = PUGIXML_TEXT("\t"), unsigned int flags = format_default, xml_encoding encoding = encoding_auto) const; + + // Get document element + xml_node document_element() const; + }; + +#ifndef PUGIXML_NO_XPATH + // XPath query return type + enum xpath_value_type + { + xpath_type_none, // Unknown type (query failed to compile) + xpath_type_node_set, // Node set (xpath_node_set) + xpath_type_number, // Number + xpath_type_string, // String + xpath_type_boolean // Boolean + }; + + // XPath parsing result + struct PUGIXML_CLASS xpath_parse_result + { + // Error message (0 if no error) + const char* error; + + // Last parsed offset (in char_t units from string start) + ptrdiff_t offset; + + // Default constructor, initializes object to failed state + xpath_parse_result(); + + // Cast to bool operator + operator bool() const; + + // Get error description + const char* description() const; + }; + + // A single XPath variable + class PUGIXML_CLASS xpath_variable + { + friend class xpath_variable_set; + + protected: + xpath_value_type _type; + xpath_variable* _next; + + xpath_variable(xpath_value_type type); + + // Non-copyable semantics + xpath_variable(const xpath_variable&); + xpath_variable& operator=(const xpath_variable&); + + public: + // Get variable name + const char_t* name() const; + + // Get variable type + xpath_value_type type() const; + + // Get variable value; no type conversion is performed, default value (false, NaN, empty string, empty node set) is returned on type mismatch error + bool get_boolean() const; + double get_number() const; + const char_t* get_string() const; + const xpath_node_set& get_node_set() const; + + // Set variable value; no type conversion is performed, false is returned on type mismatch error + bool set(bool value); + bool set(double value); + bool set(const char_t* value); + bool set(const xpath_node_set& value); + }; + + // A set of XPath variables + class PUGIXML_CLASS xpath_variable_set + { + private: + xpath_variable* _data[64]; + + void _assign(const xpath_variable_set& rhs); + void _swap(xpath_variable_set& rhs); + + xpath_variable* _find(const char_t* name) const; + + static bool _clone(xpath_variable* var, xpath_variable** out_result); + static void _destroy(xpath_variable* var); + + public: + // Default constructor/destructor + xpath_variable_set(); + ~xpath_variable_set(); + + // Copy constructor/assignment operator + xpath_variable_set(const xpath_variable_set& rhs); + xpath_variable_set& operator=(const xpath_variable_set& rhs); + + #ifdef PUGIXML_HAS_MOVE + // Move semantics support + xpath_variable_set(xpath_variable_set&& rhs) PUGIXML_NOEXCEPT; + xpath_variable_set& operator=(xpath_variable_set&& rhs) PUGIXML_NOEXCEPT; + #endif + + // Add a new variable or get the existing one, if the types match + xpath_variable* add(const char_t* name, xpath_value_type type); + + // Set value of an existing variable; no type conversion is performed, false is returned if there is no such variable or if types mismatch + bool set(const char_t* name, bool value); + bool set(const char_t* name, double value); + bool set(const char_t* name, const char_t* value); + bool set(const char_t* name, const xpath_node_set& value); + + // Get existing variable by name + xpath_variable* get(const char_t* name); + const xpath_variable* get(const char_t* name) const; + }; + + // A compiled XPath query object + class PUGIXML_CLASS xpath_query + { + private: + void* _impl; + xpath_parse_result _result; + + typedef void (*unspecified_bool_type)(xpath_query***); + + // Non-copyable semantics + xpath_query(const xpath_query&); + xpath_query& operator=(const xpath_query&); + + public: + // Construct a compiled object from XPath expression. + // If PUGIXML_NO_EXCEPTIONS is not defined, throws xpath_exception on compilation errors. + explicit xpath_query(const char_t* query, xpath_variable_set* variables = PUGIXML_NULL); + + // Constructor + xpath_query(); + + // Destructor + ~xpath_query(); + + #ifdef PUGIXML_HAS_MOVE + // Move semantics support + xpath_query(xpath_query&& rhs) PUGIXML_NOEXCEPT; + xpath_query& operator=(xpath_query&& rhs) PUGIXML_NOEXCEPT; + #endif + + // Get query expression return type + xpath_value_type return_type() const; + + // Evaluate expression as boolean value in the specified context; performs type conversion if necessary. + // If PUGIXML_NO_EXCEPTIONS is not defined, throws std::bad_alloc on out of memory errors. + bool evaluate_boolean(const xpath_node& n) const; + + // Evaluate expression as double value in the specified context; performs type conversion if necessary. + // If PUGIXML_NO_EXCEPTIONS is not defined, throws std::bad_alloc on out of memory errors. + double evaluate_number(const xpath_node& n) const; + + #ifndef PUGIXML_NO_STL + // Evaluate expression as string value in the specified context; performs type conversion if necessary. + // If PUGIXML_NO_EXCEPTIONS is not defined, throws std::bad_alloc on out of memory errors. + string_t evaluate_string(const xpath_node& n) const; + #endif + + // Evaluate expression as string value in the specified context; performs type conversion if necessary. + // At most capacity characters are written to the destination buffer, full result size is returned (includes terminating zero). + // If PUGIXML_NO_EXCEPTIONS is not defined, throws std::bad_alloc on out of memory errors. + // If PUGIXML_NO_EXCEPTIONS is defined, returns empty set instead. + size_t evaluate_string(char_t* buffer, size_t capacity, const xpath_node& n) const; + + // Evaluate expression as node set in the specified context. + // If PUGIXML_NO_EXCEPTIONS is not defined, throws xpath_exception on type mismatch and std::bad_alloc on out of memory errors. + // If PUGIXML_NO_EXCEPTIONS is defined, returns empty node set instead. + xpath_node_set evaluate_node_set(const xpath_node& n) const; + + // Evaluate expression as node set in the specified context. + // Return first node in document order, or empty node if node set is empty. + // If PUGIXML_NO_EXCEPTIONS is not defined, throws xpath_exception on type mismatch and std::bad_alloc on out of memory errors. + // If PUGIXML_NO_EXCEPTIONS is defined, returns empty node instead. + xpath_node evaluate_node(const xpath_node& n) const; + + // Get parsing result (used to get compilation errors in PUGIXML_NO_EXCEPTIONS mode) + const xpath_parse_result& result() const; + + // Safe bool conversion operator + operator unspecified_bool_type() const; + + // Borland C++ workaround + bool operator!() const; + }; + + #ifndef PUGIXML_NO_EXCEPTIONS + #if defined(_MSC_VER) + // C4275 can be ignored in Visual C++ if you are deriving + // from a type in the Standard C++ Library + #pragma warning(push) + #pragma warning(disable: 4275) + #endif + // XPath exception class + class PUGIXML_CLASS xpath_exception: public std::exception + { + private: + xpath_parse_result _result; + + public: + // Construct exception from parse result + explicit xpath_exception(const xpath_parse_result& result); + + // Get error message + virtual const char* what() const throw() PUGIXML_OVERRIDE; + + // Get parse result + const xpath_parse_result& result() const; + }; + #if defined(_MSC_VER) + #pragma warning(pop) + #endif + #endif + + // XPath node class (either xml_node or xml_attribute) + class PUGIXML_CLASS xpath_node + { + private: + xml_node _node; + xml_attribute _attribute; + + typedef void (*unspecified_bool_type)(xpath_node***); + + public: + // Default constructor; constructs empty XPath node + xpath_node(); + + // Construct XPath node from XML node/attribute + xpath_node(const xml_node& node); + xpath_node(const xml_attribute& attribute, const xml_node& parent); + + // Get node/attribute, if any + xml_node node() const; + xml_attribute attribute() const; + + // Get parent of contained node/attribute + xml_node parent() const; + + // Safe bool conversion operator + operator unspecified_bool_type() const; + + // Borland C++ workaround + bool operator!() const; + + // Comparison operators + bool operator==(const xpath_node& n) const; + bool operator!=(const xpath_node& n) const; + }; + +#ifdef __BORLANDC__ + // Borland C++ workaround + bool PUGIXML_FUNCTION operator&&(const xpath_node& lhs, bool rhs); + bool PUGIXML_FUNCTION operator||(const xpath_node& lhs, bool rhs); +#endif + + // A fixed-size collection of XPath nodes + class PUGIXML_CLASS xpath_node_set + { + public: + // Collection type + enum type_t + { + type_unsorted, // Not ordered + type_sorted, // Sorted by document order (ascending) + type_sorted_reverse // Sorted by document order (descending) + }; + + // Constant iterator type + typedef const xpath_node* const_iterator; + + // We define non-constant iterator to be the same as constant iterator so that various generic algorithms (i.e. boost foreach) work + typedef const xpath_node* iterator; + + // Default constructor. Constructs empty set. + xpath_node_set(); + + // Constructs a set from iterator range; data is not checked for duplicates and is not sorted according to provided type, so be careful + xpath_node_set(const_iterator begin, const_iterator end, type_t type = type_unsorted); + + // Destructor + ~xpath_node_set(); + + // Copy constructor/assignment operator + xpath_node_set(const xpath_node_set& ns); + xpath_node_set& operator=(const xpath_node_set& ns); + + #ifdef PUGIXML_HAS_MOVE + // Move semantics support + xpath_node_set(xpath_node_set&& rhs) PUGIXML_NOEXCEPT; + xpath_node_set& operator=(xpath_node_set&& rhs) PUGIXML_NOEXCEPT; + #endif + + // Get collection type + type_t type() const; + + // Get collection size + size_t size() const; + + // Indexing operator + const xpath_node& operator[](size_t index) const; + + // Collection iterators + const_iterator begin() const; + const_iterator end() const; + + // Sort the collection in ascending/descending order by document order + void sort(bool reverse = false); + + // Get first node in the collection by document order + xpath_node first() const; + + // Check if collection is empty + bool empty() const; + + private: + type_t _type; + + xpath_node _storage[1]; + + xpath_node* _begin; + xpath_node* _end; + + void _assign(const_iterator begin, const_iterator end, type_t type); + void _move(xpath_node_set& rhs) PUGIXML_NOEXCEPT; + }; +#endif + +#ifndef PUGIXML_NO_STL + // Convert wide string to UTF8 + std::basic_string, std::allocator > PUGIXML_FUNCTION as_utf8(const wchar_t* str); + std::basic_string, std::allocator > PUGIXML_FUNCTION as_utf8(const std::basic_string, std::allocator >& str); + + // Convert UTF8 to wide string + std::basic_string, std::allocator > PUGIXML_FUNCTION as_wide(const char* str); + std::basic_string, std::allocator > PUGIXML_FUNCTION as_wide(const std::basic_string, std::allocator >& str); +#endif + + // Memory allocation function interface; returns pointer to allocated memory or NULL on failure + typedef void* (*allocation_function)(size_t size); + + // Memory deallocation function interface + typedef void (*deallocation_function)(void* ptr); + + // Override default memory management functions. All subsequent allocations/deallocations will be performed via supplied functions. + void PUGIXML_FUNCTION set_memory_management_functions(allocation_function allocate, deallocation_function deallocate); + + // Get current memory management functions + allocation_function PUGIXML_FUNCTION get_memory_allocation_function(); + deallocation_function PUGIXML_FUNCTION get_memory_deallocation_function(); +} + +#if !defined(PUGIXML_NO_STL) && (defined(_MSC_VER) || defined(__ICC)) +namespace std +{ + // Workarounds for (non-standard) iterator category detection for older versions (MSVC7/IC8 and earlier) + std::bidirectional_iterator_tag PUGIXML_FUNCTION _Iter_cat(const pugi::xml_node_iterator&); + std::bidirectional_iterator_tag PUGIXML_FUNCTION _Iter_cat(const pugi::xml_attribute_iterator&); + std::bidirectional_iterator_tag PUGIXML_FUNCTION _Iter_cat(const pugi::xml_named_node_iterator&); +} +#endif + +#if !defined(PUGIXML_NO_STL) && defined(__SUNPRO_CC) +namespace std +{ + // Workarounds for (non-standard) iterator category detection + std::bidirectional_iterator_tag PUGIXML_FUNCTION __iterator_category(const pugi::xml_node_iterator&); + std::bidirectional_iterator_tag PUGIXML_FUNCTION __iterator_category(const pugi::xml_attribute_iterator&); + std::bidirectional_iterator_tag PUGIXML_FUNCTION __iterator_category(const pugi::xml_named_node_iterator&); +} +#endif + +#endif + +// Make sure implementation is included in header-only mode +// Use macro expansion in #include to work around QMake (QTBUG-11923) +#if defined(PUGIXML_HEADER_ONLY) && !defined(PUGIXML_SOURCE) +# define PUGIXML_SOURCE "pugixml.cpp" +# include PUGIXML_SOURCE +#endif + +/** + * Copyright (c) 2006-2022 Arseny Kapoulkine + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ diff --git a/third_party/snowpack/snowpackCore/CMakeLists.txt b/third_party/snowpack/snowpackCore/CMakeLists.txt index 746ff33b..a7f7b046 100644 --- a/third_party/snowpack/snowpackCore/CMakeLists.txt +++ b/third_party/snowpack/snowpackCore/CMakeLists.txt @@ -5,9 +5,12 @@ SET(core_sources snowpackCore/Snowpack.cc snowpackCore/Aggregate.cc snowpackCore/WaterTransport.cc + snowpackCore/VapourTransport.cc snowpackCore/ReSolver1d.cc snowpackCore/Canopy.cc snowpackCore/Metamorphism.cc snowpackCore/PhaseChange.cc + snowpackCore/SeaIce.cc snowpackCore/Solver.cc + snowpackCore/SalinityTransport.cc ) diff --git a/third_party/snowpack/snowpackCore/Canopy.cc b/third_party/snowpack/snowpackCore/Canopy.cc index e6d16d2b..c38fc31b 100644 --- a/third_party/snowpack/snowpackCore/Canopy.cc +++ b/third_party/snowpack/snowpackCore/Canopy.cc @@ -1,4 +1,4 @@ -/* +/* * SNOWPACK stand-alone * * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND @@ -20,8 +20,8 @@ #include "Canopy.h" #include "../Constants.h" -#include "../Laws_sn.h" #include "../Utils.h" +#include "../Laws_sn.h" #include #include @@ -34,84 +34,83 @@ using namespace mio; ************************************************************/ /** - * @name CANOPY PARAMETERS - * @brief History of changed values: - * - 2007-12-20: update based on data from all SnowMIP2 sites, and calibration using Alptal data + * @brief Write header for 28 canopy parameters to Outfile, columns 65-92 + * @param fout Dump file stream */ -//@{ +void Canopy::DumpCanopyHeader(std::ofstream &fout) +{ + // 28 canopy fields (27 assigned, 1 empty at the end) -/// @brief INTERCEPTION -/** - * @brief Specific interception capacity for snow (i_LAI) (mm/LAI) \n - * Please note that this parameter is further multiplied with (0.27+46/new_snow_density[Ta]) following (Pomeroy et al, Hydr. Proc. 1998) - * - 5.9 Spruce and 6.6 Pine (Schmidt&Glums,CanJForRes,1991) - */ -const double Canopy::int_cap_snow = 5.9; -/// Specific interception capacity for rain (I_LAI) (mm/LAI) -const double Canopy::int_cap_rain = 0.3; -/** Coef in interception function, see (Pomeroy et al,1998) where a value of 0.7 was - * found to be appropriate for hourly time-step, but smaller time steps require smaller - * values, 0.5 was found reasoanble by using the SnowMIP2 data (2007-12-09) - */ -const double Canopy::interception_timecoef = 0.5; - -/// @brief RADIATION BALANCE -const double Canopy::can_alb_dry = 0.11; // Albedo of dry canopy (calibr: 0.09, Alptal) -const double Canopy::can_alb_wet = 0.11; // Albedo of wet canopy (calibr: 0.09, Alptal) -const double Canopy::can_alb_snow = 0.35; // Albedo of snow covered albedo (calibr: 0.35, Alptal) -const double Canopy::krnt_lai = .75; // Radiation transmissivity parameter, in the range 0.4-0.8 if the true LAI is used; higher if optical LAI is used. - // (calibrated on Alptal) -const double Canopy::can_diameter = 1.0; // average canopy (tree) diameter [m], parameter in the new radiation transfer model - -/// @brief ENERGY BALANCE -/// parameters for HeatMass and 2layercanopy -const double Canopy:: biomass_heat_capacity = 2800. ; // from Linroth et al., 2013 (J Kg-1 K-1) -const double Canopy:: biomass_density = 900. ; // from Linroth et al., 2013 (Kg m-3) -const double Canopy:: lai_frac_top_default = 0.5 ; // fraction of total LAI that is attributed to the uppermost layer. Here calibrated for Alptal. -const double Canopy:: trunk_frac_height = 0.2 ; // (optional) fraction of total tree height occupied by trunks, - // used to calculate direct solar insolation of trunks. -const double Canopy:: trunkalb = 0.09 ; // trunk albedo -const double Canopy:: et = 1. ; // trunk emissivity - -/// @brief TURBULENT HEAT EXCHANGE -/// @brief Stab. corr. aerodyn. resist. above and below canopy: 0=off and 1=on (Monin-Obukhov formulation) -const bool Canopy::canopy_stabilitycorrection = true; -/// @brief Ratio between canopy height and roughness length -const double Canopy::roughmom_to_canopyheight_ratio = 0.10; -/// @brief As above for displacement height -const double Canopy::displ_to_canopyheight_ratio = 0.6667; + // PRIMARY "STATE" VARIABLES + fout << ",Interception storage"; + fout << ",Canopy surface temperature"; -/** - * @brief Fractional increase of aerodynamic resistance for evaporation of intercepted snow. - * - 10.0 from Koivusalo and Kokkonen (2002) - * - 8.0 calibration with Alptal data - */ -const double Canopy::raincrease_snow = 10.0; - -/// @brief Maximum allowed canopy temperature change (K hr-1) -const double Canopy::canopytemp_maxchange_perhour = 7.0; -/// @brief (~=1, but Not allowed to be exactly 1) -const double Canopy::roughheat_to_roughmom_ratio = 0.9999; -/// @brief minimum heat exchange (Wm-2K-1) at zero wind -const double Canopy::can_ch0 = 3.; -/// @brief 1+CAN_RS_MULT = maximum factor to increase Cdata->rs below canopy -const double Canopy::can_rs_mult = 3.0; -/// @brief TRANSPIRATION -/// @brief Minimum canopy surface resistance, 500 (sm-1) is for needle leaf treas van den Hurk et al (2000) *75% Gustafsson et al (2003) -const double Canopy::rsmin = 375.0; + // SECONDARY "STATE" VARIABLES + fout << ",Canopy albedo"; + fout << ",Wet fraction"; + fout << ",Interception capacity"; + + // RADIATIVE FLUXES (W m-2) + fout << ",Net shortwave radiation absorbed by canopy"; + fout << ",Net longwave radiation absorbed by canopy"; + fout << ",Net radiation to canopy"; + + // HEAT FLUXES CANOPY (W m-2) + fout << ",Sensible heat flux to canopy"; + fout << ",Latent heat flux to canopy"; + fout << ",Biomass heat storage flux towards Canopy"; + + // WATER FLUXES CANOPY (kg m-2) + fout << ",Transpiration of the canopy"; + fout << ",Evaporation and sublimation of interception (liquid and frozen)"; + fout << ",Interception rate"; + fout << ",Throughfall"; + fout << ",Snow unload"; + + // TOTAL SURFACE FLUXES,EVAPORATION; ETC + fout << ",Longwave radiation up above canopy"; + fout << ",Longwave radiation down above canopy"; + fout << ",Shortwave radiation up above canopy"; + fout << ",Shortwave radiation down above canopy"; + fout << ",Total land surface albedo"; + fout << ",Total net radiation to the surface (ground + canopy)"; + fout << ",Surface radiative temperature (ground + canopy)"; + fout << ",Forest floor albedo"; + fout << ",Snowfall rate Above Canopy"; + fout << ",Rainfall rate Above Canopy"; + fout << ",Evapotranspiration of the total surface (ground + canopy)"; + fout << ","; //Note: 1 empty field here! + return; +} /** - * @brief gd (Pa-1) parameter for canopy surface resistance response to vapour pressure: - * - 0.0003 = trees (needle or broadleafs) - * - 0=crops, grass, tundra etc + * @brief Write units for 28 canopy parameters to Outfile, columns 65-92 + * @param fout Dump file stream */ -const double Canopy::f3_gd = 0.0003; -/// @brief Root depth, determining the soil layers influenced by root water uptake -const double Canopy::rootdepth = 1.0; -/// @brief Wilting point, defined as a fraction of water content at field capacity (-) -const double Canopy::wp_fraction = 0.17; -//@} +void Canopy::DumpCanopyUnits(std::ofstream &fout) +{ + // 28 canopy fields (27 assigned, 1 empty at the end) + + // PRIMARY "STATE" VARIABLES + fout << ",kg m-2,degC"; + + // SECONDARY "STATE" VARIABLES + fout << ",-,-,kg m-2"; + // RADIATIVE FLUXES (W m-2) + fout << ",W m-2,W m-2,W m-2"; + + // HEAT FLUXES CANOPY (W m-2) + fout << ",W m-2,W m-2,W m-2"; + + // WATER FLUXES CANOPY (kg m-2) + fout << ",kg m-2 per timestep,kg m-2 per timestep,kg m-2 per timestep,kg m-2 per timestep,kg m-2 per timestep"; + + // TOTAL SURFACE FLUXES,EVAPORATION; ETC + fout << ",W m-2,W m-2,W m-2,W m-2,-,W m-2,degC,-,kg m-2 per timestep,kg m-2 per timestep,kg m-2 per timestep"; + fout << ","; //Note: 1 empty field here! + return; +} /** * @brief Dump 28 canopy parameters to Outfile, columns 65-92 @@ -123,52 +122,53 @@ const double Canopy::wp_fraction = 0.17; void Canopy::DumpCanopyData(std::ofstream &fout, const CanopyData *Cdata, const SurfaceFluxes *Sdata, const double cos_sl) { // PRIMARY "STATE" VARIABLES - fout << "," << Cdata->storage/cos_sl; // intercepted water (mm or kg m-2) - fout << "," << IOUtils::K_TO_C(Cdata->temp); // temperature (degC) + fout << "," << Cdata->storage/cos_sl; // intercepted water (mm or kg m-2) + fout << "," << IOUtils::K_TO_C(Cdata->temp); // temperature (degC) // SECONDARY "STATE" VARIABLES - fout << "," << Cdata->canopyalb; // albedo (1) - fout << "," << Cdata->wetfraction; // wet fraction - fout << "," << Cdata->intcapacity/cos_sl; // interception capacity (kg m-2) + fout << "," << Cdata->canopyalb; // albedo (1) + fout << "," << Cdata->wetfraction; // wet fraction + fout << "," << Cdata->intcapacity/cos_sl; // interception capacity (kg m-2) // RADIATIVE FLUXES (W m-2) - fout << "," << Cdata->rsnet; // net shortwave radiation to canopy - fout << "," << Cdata->rlnet; // net longwave radiation to canopy - fout << "," << Cdata->rsnet+Cdata->rlnet; // net radiation to canopy + fout << "," << Cdata->rsnet; // net shortwave radiation to canopy + fout << "," << Cdata->rlnet; // net longwave radiation to canopy + fout << "," << Cdata->rsnet+Cdata->rlnet; // net radiation to canopy // HEAT FLUXES CANOPY (W m-2) - fout << "," << -Cdata->sensible; // sensible heat flux to canopy (>0 towards canopy) - fout << "," << -Cdata->latentcorr; // latent heat flux to canopy (>0 towards canopy) - fout << "," << Cdata->CondFluxCanop, + fout << "," << -Cdata->sensible; // sensible heat flux to canopy (>0 towards canopy) + fout << "," << -Cdata->latentcorr; // latent heat flux to canopy (>0 towards canopy) + fout << "," << Cdata->CondFluxCanop; // biomass heat storage flux towards Canopy // WATER FLUXES CANOPY (kg m-2) - fout << "," << Cdata->transp/cos_sl; // transpiration - fout << "," << Cdata->intevap/cos_sl; // interception evaporation - fout << "," << Cdata->interception/cos_sl; // interception - fout << "," << Cdata->throughfall/cos_sl; // throughfall - fout << "," << Cdata->snowunload/cos_sl; // unload of snow + fout << "," << Cdata->transp/cos_sl; // transpiration + fout << "," << Cdata->intevap/cos_sl; // interception evaporation + fout << "," << Cdata->interception/cos_sl; // interception + fout << "," << Cdata->throughfall/cos_sl; // throughfall + fout << "," << Cdata->snowunload/cos_sl; // unload of snow // TOTAL SURFACE FLUXES,EVAPORATION; ETC - fout << "," << Cdata->rlwrac; // upward longwave radiation ABOVE canopy - fout << "," << Cdata->ilwrac; // downward longwave radiation ABOVE canopy - fout << "," << Cdata->rswrac; // upward shortwave above canopy - fout << "," << Cdata->iswrac; // downward shortwave radiation above canopy - fout << "," << Cdata->totalalb; // total albedo [-] + fout << "," << Cdata->rlwrac; // upward longwave radiation ABOVE canopy + fout << "," << Cdata->ilwrac; // downward longwave radiation ABOVE canopy + fout << "," << Cdata->rswrac; // upward shortwave above canopy + fout << "," << Cdata->iswrac; // downward shortwave radiation above canopy + fout << "," << Cdata->totalalb; // total albedo [-] fout << "," << Cdata->rlnet+Sdata->lw_net+Cdata->rsnet+Sdata->qw; // net radiation to the total surface fout << "," << IOUtils::K_TO_C(pow(Cdata->rlwrac/Constants::stefan_boltzmann, 0.25)); // surface (ground + canopy) temperature - fout << "," << Cdata->forestfloor_alb; // albedo of the forest floor [-] - fout << "," << Cdata->snowfac/cos_sl; // snowfall rate above canopy (mm per output timestep) - fout << "," << Cdata->rainfac/cos_sl; // rainfall rate above canopy (mm per output timestep) + fout << "," << Cdata->forestfloor_alb; // albedo of the forest floor [-] + fout << "," << Cdata->snowfac/cos_sl; // snowfall rate above canopy (mm per output timestep) + fout << "," << Cdata->rainfac/cos_sl; // rainfall rate above canopy (mm per output timestep) fout << "," << (Cdata->transp+Cdata->intevap-(Sdata->mass[SurfaceFluxes::MS_SUBLIMATION]+Sdata->mass[SurfaceFluxes::MS_EVAPORATION]))/cos_sl;// evapotranspiration of total surface (mm h-1) - fout << ","; // 1 empty field here + fout << ","; // 1 empty field here } +//TODO: this function is not yet integrated into AsciiIO, and now this function is actually never called by any routine. void Canopy::writeTimeSeriesAdd2LCanopy(std::ofstream &fout, const CanopyData *Cdata) { - fout << "," << IOUtils::K_TO_C(Cdata->Ttrunk); // Trunk temperature (degC) - fout << "," << Cdata->CondFluxTrunks; // Trunk biomass heat storage flux (W m-2) - fout << "," << Cdata->LWnet_Trunks; // net LW radiations to Trunk layer (W m-2) - fout << "," << Cdata->SWnet_Trunks; // net SW radiations to Trunk layer (W m-2) - fout << "," << -Cdata->QStrunks; // sensible heat flux to trunk layer (W m-2), (>0 towards trunks) + fout << "," << IOUtils::K_TO_C(Cdata->Ttrunk); // Trunk temperature (degC) + fout << "," << Cdata->CondFluxTrunks; // Trunk biomass heat storage flux (W m-2) + fout << "," << Cdata->LWnet_Trunks; // net LW radiations to Trunk layer (W m-2) + fout << "," << Cdata->SWnet_Trunks; // net SW radiations to Trunk layer (W m-2) + fout << "," << -Cdata->QStrunks; // sensible heat flux to trunk layer (W m-2), (>0 towards trunks) fout << ",,,"; } /****i******************************************************* @@ -177,7 +177,7 @@ void Canopy::writeTimeSeriesAdd2LCanopy(std::ofstream &fout, const CanopyData *C Canopy::Canopy(const SnowpackConfig& cfg) : hn_density(), hn_density_parameterization(), variant(), watertransportmodel_soil(), hn_density_fixedValue(Constants::undefined), calculation_step_length(0.), useSoilLayers(false), - CanopyHeatMass(true), Twolayercanopy(true), canopytransmission(true), forestfloor_alb(true) + CanopyHeatMass(true), Twolayercanopy(true), Twolayercanopy_user(true), canopytransmission(true), forestfloor_alb(true) { cfg.getValue("VARIANT", "SnowpackAdvanced", variant); cfg.getValue("SNP_SOIL", "Snowpack", useSoilLayers); @@ -188,7 +188,8 @@ Canopy::Canopy(const SnowpackConfig& cfg) cfg.getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", watertransportmodel_soil); cfg.getValue("CANOPY_HEAT_MASS", "SnowpackAdvanced", CanopyHeatMass); cfg.getValue("CANOPY_TRANSMISSION", "SnowpackAdvanced", canopytransmission); - cfg.getValue("TWO_LAYER_CANOPY", "SnowpackAdvanced", Twolayercanopy); + cfg.getValue("TWO_LAYER_CANOPY", "SnowpackAdvanced", Twolayercanopy_user); + Twolayercanopy = Twolayercanopy_user; cfg.getValue("FORESTFLOOR_ALB", "SnowpackAdvanced", forestfloor_alb); } @@ -201,9 +202,9 @@ Canopy::Canopy(const SnowpackConfig& cfg) */ double Canopy::get_f1(const double& ris) { - const double a = 0.81; - const double b = 0.004; - const double c = 0.05; + static const double a = 0.81; + static const double b = 0.004; + static const double c = 0.05; const double f1 = ( a * ( 1.0 + b * ris ) ) / ( b * ris + c ); if (f1 < 1.0) { return 1.0; @@ -220,21 +221,21 @@ double Canopy::get_f1(const double& ris) * @param zlower * @return double */ -double Canopy::RootFraction(const double& zupper, const double& zlower) +double Canopy::RootFraction(const double& zupper, const double& zlower, const double rootdepth) { double rf = 0.0; - // Constants.h: Canopy::rootdepth, default 0.5 - if ( zupper < Canopy::rootdepth ) { - const double ar = 6.706; // evergreen needleleaf trees - const double br = 2.175; // evergreen needleleaf trees + // Constants.h: Xdata.Cdata.rootdepth, default 0.5 + if ( zupper < rootdepth ) { + static const double ar = 6.706; // evergreen needleleaf trees + static const double br = 2.175; // evergreen needleleaf trees // fraction of roots below root depth (according to exponential distribution) - const double tail = 0.5 * (exp(-ar * Canopy::rootdepth)+ exp(-br * Canopy::rootdepth)); + const double tail = 0.5 * (exp(-ar * rootdepth)+ exp(-br * rootdepth)); // multiplicative factor to distribute tail on layers above root depth rf = ( ( 1. + tail / ( 1. - tail ) ) * 0.5 * (exp(-ar * zupper) + exp(-br * zupper) - -exp(-ar * std::min(zlower, Canopy::rootdepth)) - -exp(-br * std::min(zlower, Canopy::rootdepth)))); + -exp(-ar * std::min(zlower, rootdepth)) + -exp(-br * std::min(zlower, rootdepth)))); } return (rf); @@ -255,7 +256,9 @@ double Canopy::RootFraction(const double& zupper, const double& zlower) * @param *EMS * @param transpiration */ -void Canopy::SoilWaterUptake(const size_t& SoilNode, const double& transpiration, ElementData* EMS) +void Canopy::SoilWaterUptake(const size_t& SoilNode, const double& transpiration, + ElementData* EMS, const double wp_fraction, + const double rootdepth, const double h_wilt) const { // transpiration [mm] if ( transpiration == 0. ) return; @@ -269,16 +272,24 @@ void Canopy::SoilWaterUptake(const size_t& SoilNode, const double& transpiration size_t RootLayer = SoilNode; for (size_t e = SoilNode; e --> 0; ) {//e gets decremented right away -> start at SoilNode // fraction of roots in layer - const double rootfr = RootFraction(zupper, zupper + EMS[e].L); + const double rootfr = RootFraction(zupper, zupper + EMS[e].L, rootdepth); const double water = transpiration; if (rootfr > 0.0 ){ // Index of last layer with roots RootLayer = e; // Change in volumetric water content in layer - const double d_theta_l = std::min( std::max(0., ( EMS[e].theta[WATER] - - Canopy::wp_fraction * EMS[e].soilFieldCapacity() )), - rootfr*water / ( Constants::density_water * EMS[e].L ) ); + double d_theta_l = 0.; + if (watertransportmodel_soil == "RICHARDSEQUATION" && EMS[e].VG.defined == true) { + const double theta_wilt = EMS[e].VG.fromHtoTHETAforICE(h_wilt, EMS[e].theta[ICE]); + d_theta_l = std::min( std::max(0., ( EMS[e].theta[WATER] - + theta_wilt )), + rootfr*water / ( Constants::density_water * EMS[e].L ) ); + } else { + d_theta_l = std::min( std::max(0., ( EMS[e].theta[WATER] - + wp_fraction * EMS[e].soilFieldCapacity() )), + rootfr*water / ( Constants::density_water * EMS[e].L ) ); + } // residual water to be extracted in layers below waterresidual -= rootfr * water; @@ -304,20 +315,25 @@ void Canopy::SoilWaterUptake(const size_t& SoilNode, const double& transpiration // modify by Moustapha if there is a problem . RootLayer -= 1; } - const double d_theta = std::min( std::max(0., ( EMS[RootLayer].theta[WATER] - - Canopy::wp_fraction * EMS[RootLayer].soilFieldCapacity() ) ), - waterresidual / ( Constants::density_water * EMS[RootLayer].L ) ); - if (watertransportmodel_soil == "RICHARDSEQUATION") { + if (watertransportmodel_soil == "RICHARDSEQUATION" && EMS[RootLayer].VG.defined == true) { // Transpiration is considered a source/sink term for Richards equation + const double theta_wilt = EMS[RootLayer].VG.fromHtoTHETAforICE(h_wilt, EMS[RootLayer].theta[ICE]); + const double d_theta = std::min( std::max(0., ( EMS[RootLayer].theta[WATER] - + theta_wilt ) ), + waterresidual_real / ( Constants::density_water * EMS[RootLayer].L ) ); EMS[RootLayer].lwc_source -= d_theta; + waterresidual_real -= d_theta * Constants::density_water * EMS[RootLayer].L; } else { + const double d_theta = std::min( std::max(0., ( EMS[RootLayer].theta[WATER] - + wp_fraction * EMS[RootLayer].soilFieldCapacity() ) ), + waterresidual_real / ( Constants::density_water * EMS[RootLayer].L ) ); EMS[RootLayer].theta[WATER] -= d_theta; assert(EMS[RootLayer].theta[WATER] >= -Constants::eps); EMS[RootLayer].theta[AIR] += d_theta; assert(EMS[RootLayer].theta[AIR] >= -Constants::eps); + waterresidual_real -= d_theta * Constants::density_water * EMS[RootLayer].L; } - waterresidual_real -= d_theta * Constants::density_water * EMS[RootLayer].L; // Check if water content is below wilting point in last layer if ( waterresidual_real > 0.5 ) { @@ -340,8 +356,8 @@ void Canopy::SoilWaterUptake(const size_t& SoilNode, const double& transpiration */ double Canopy::get_f4(const double& tempC) { - const double F4_A = 1.75; - const double F4_B = 0.5; + static const double F4_A = 1.75; + static const double F4_B = 0.5; // OBS tempC in C const double f4 = 1.0 / ( 1.0 - exp( -F4_A * pow( std::max( 0.00001, tempC ), F4_B ) ) ); @@ -358,7 +374,7 @@ double Canopy::get_f4(const double& tempC) * @param *EMS * @return double */ -double Canopy::get_f2f4(const size_t& SoilNode, ElementData* EMS) +double Canopy::get_f2f4(const size_t& SoilNode, ElementData* EMS, const double wp_fraction, const double rootdepth) { double f2_wpwp; double f2_wcap; double thet_act; @@ -370,13 +386,13 @@ double Canopy::get_f2f4(const size_t& SoilNode, ElementData* EMS) double zupper = 0.; for (size_t e = SoilNode; e --> 0; ) { //e gets decremented right away -> start at SoilNode // 1) root fraction in layer - const double rootfr = RootFraction(zupper, zupper + EMS[e].L); + const double rootfr = RootFraction(zupper, zupper + EMS[e].L, rootdepth); if (rootfr > 0.0 ){ RootLayer = e; // 2) Field Capacity in layer f2_wcap = EMS[e].soilFieldCapacity(); // 3) Wilting point in layer - f2_wpwp = f2_wcap * Canopy::wp_fraction; + f2_wpwp = f2_wcap * wp_fraction; // 4) Soil water content in layer (from a root's point of view) thet_act = std::max(f2_wpwp, EMS[e].theta[WATER]); // 4) Inversed soilwater stress weighted by root fractin in layer @@ -388,12 +404,12 @@ double Canopy::get_f2f4(const size_t& SoilNode, ElementData* EMS) } zupper += EMS[e].L; }// End of loop and now do the bottom layer - + if ( RootLayer > 0 ){ RootLayer -= 1; } f2_wcap = EMS[RootLayer].soilFieldCapacity(); - f2_wpwp = f2_wcap * Canopy::wp_fraction; + f2_wpwp = f2_wcap * wp_fraction; thet_act = std::max(f2_wpwp, EMS[RootLayer].theta[WATER]); f2 += rootresidual * (thet_act - f2_wpwp) / (f2_wcap - f2_wpwp); f4 += get_f4(IOUtils::K_TO_C(EMS[RootLayer].Te)) * rootresidual; @@ -414,14 +430,14 @@ double Canopy::get_f2f4(const size_t& SoilNode, ElementData* EMS) * @param vpd * @return double */ -double Canopy::get_f3(const double& vpd) +double Canopy::get_f3(const double& vpd, const double f3_gd) { /* * double F3_GD=0.0003; => now defined in Constants.h * gd [Pa-1] value is for trees (needle or bradleafs), other veg. as crops, * grass tundra etc should have gd=0; */ - const double f3 = 1.0 / exp( -Canopy::f3_gd * vpd ); + const double f3 = 1.0 / exp( -f3_gd * vpd ); return (f3); } @@ -430,12 +446,12 @@ double Canopy::IntCapacity(const CurrentMeteo& Mdata, const SnowStation& Xdata, const double rho_new_snow = SnLaws::compNewSnowDensity(hn_density, hn_density_parameterization, hn_density_fixedValue, Mdata, Xdata, Xdata.Cdata.temp, variant); - if (!force_rain && rho_new_snow!=Constants::undefined && Mdata.psum_ph<1.) { //right conditions for snow - const double density_of_mixed = rho_new_snow*(1.-Mdata.psum_ph) + 1000.*Mdata.psum_ph; - return (Canopy::int_cap_snow * Xdata.Cdata.lai * ( 0.27+46.0 / density_of_mixed )); - } else { - return (Canopy::int_cap_rain * Xdata.Cdata.lai); - } + if (!force_rain && rho_new_snow!=Constants::undefined && Mdata.psum_ph<1.) { //right conditions for snow + const double density_of_mixed = rho_new_snow*(1.-Mdata.psum_ph) + 1000.*Mdata.psum_ph; + return ( Xdata.Cdata.int_cap_snow * Xdata.Cdata.lai * ( 0.27+46.0 / density_of_mixed )); + } else { + return ( Xdata.Cdata.int_cap_rain * Xdata.Cdata.lai); + } } /** @@ -462,10 +478,10 @@ double Canopy::IntUnload(const double& capacity, const double& storage) * @param *interception * @param direct */ -double Canopy::IntRate(const double& capacity, const double& storage, const double& prec, const double& direct) +double Canopy::IntRate(const double& capacity, const double& storage, const double& prec, const double& direct, const double interception_timecoef) { const double interception = std::min( ( 1.0 - direct ) * prec, - Canopy::interception_timecoef * ( capacity - storage)* + interception_timecoef * ( capacity - storage)* ( 1.0 - exp( -(1.0 - direct) * prec / capacity ) ) ); if ( interception < 0.0) return 0.; @@ -474,13 +490,13 @@ double Canopy::IntRate(const double& capacity, const double& storage, const doub } -double Canopy::CanopyAlbedo(const double& tair, const double& wetfrac) +double Canopy::CanopyAlbedo(const double& tair, const double& wetfrac, const SnowStation& Xdata) { // Albedo of partly "wet" canopy = weighted average of dry and wet parts - if (tair > Constants::melting_tk ) { - return (wetfrac * Canopy::can_alb_wet + (1. - wetfrac) * Canopy::can_alb_dry); + if (tair > Constants::meltfreeze_tk ) { + return (wetfrac * Xdata.Cdata.can_alb_wet + (1. - wetfrac) * Xdata.Cdata.can_alb_dry); } else { - return (wetfrac * Canopy::can_alb_snow + (1. - wetfrac) * Canopy::can_alb_dry); + return (wetfrac * Xdata.Cdata.can_alb_snow + (1. - wetfrac) * Xdata.Cdata.can_alb_dry); } } @@ -519,10 +535,10 @@ double Canopy::TotalAlbedo(double CanAlb, double sigf, double SurfAlb, double Di * @param elev in radiants * @return double */ -double Canopy::CanopyShadeSoilCover(const double& height, const double& cover, const double& elev) +double Canopy::CanopyShadeSoilCover(const double& height, const double& cover, const double& elev, const double& can_diameter) { if ( elev > 0.0 ) { - return std::min(1.0, cover * (1.0 + 4.0 * height / (Constants::pi * Canopy::can_diameter * tan(elev)))); + return std::min(1.0, cover * (1.0 + 4.0 * height / (Constants::pi * can_diameter * tan(elev)))); } else { return 1.0; } @@ -550,10 +566,10 @@ double Canopy::CanopyWetFraction(const double& capacity, const double& storage) * @param elev * @return sigf */ -double Canopy::CanopyTransmissivity(const double& lai, const double& elev) +double Canopy::CanopyTransmissivity(const double& lai, const double& elev, const double krnt_lai) { const double pai = 0.; // pai [additional plant area index] could be a function of interception storage - return (1. - exp(-Canopy::krnt_lai * (lai + pai) / std::max(sin(elev), 0.0001))); // Beer-Lambert type of law + return (1. - exp(-krnt_lai * (lai + pai) / std::max(sin(elev), 0.0001))); // Beer-Lambert type of law } /** @@ -584,7 +600,7 @@ double Canopy::CanopyTransmissivity(const double& lai, const double& elev) void Canopy::LineariseNetRadiation(const CurrentMeteo& Mdata, const CanopyData& Cdata, const SnowStation& Xdata, double& iswrac, double& rsnet, double& ilwrac, double& r0,double& r1, const double& canopyalb, double& CanopyClosureDirect, double& RadFracDirect, - const double& sigfdirect, double& r1p) + const double& sigfdirect, double& r1p) const { // Variables used a lot const bool snow = (Xdata.getNumberOfElements()>Xdata.SoilNode); @@ -610,11 +626,11 @@ void Canopy::LineariseNetRadiation(const CurrentMeteo& Mdata, const CanopyData& } const double sigf = Cdata.sigf; // Canopy Closure for diffuse shortwave and longwave - const double CanopyClosure = 1. - Xdata.Cdata.direct_throughfall; //HACK: we already pass Cdata + const double CanopyClosure = 1. - Cdata.direct_throughfall; //HACK: we already pass Cdata // Canopy Closure for direct shortwave - if ( Canopy::canopytransmission ){ - CanopyClosureDirect = CanopyShadeSoilCover(Cdata.height, CanopyClosure, elev); + if (canopytransmission ){ + CanopyClosureDirect = CanopyShadeSoilCover(Cdata.height, CanopyClosure, elev, Cdata.can_diameter); } else{ CanopyClosureDirect = CanopyClosure; } @@ -684,7 +700,7 @@ void Canopy::LineariseNetRadiation(const CurrentMeteo& Mdata, const CanopyData& void Canopy::LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData& Cdata, const SnowStation& Xdata, double& iswrac, double& rsnet, double& ilwrac, double& r0,double& r1, double& r2, double& rt0, double& rt1, double& rt2, const double& canopyalb, double& CanopyClosureDirect, double& RadFracDirect, - const double& sigfdirect,const double& sigftrunkdirect, double& r1p, double& r2p) + const double& sigfdirect,const double& sigftrunkdirect, double& r1p, double& r2p) const { // Variables used a lot const bool snow = (Xdata.getNumberOfElements()>Xdata.SoilNode); @@ -716,8 +732,8 @@ void Canopy::LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData const double CanopyClosure = 1. - Cdata.direct_throughfall; //HACK: we already pass Cdata double CanClosDirLeaves, CanClosDirTrunks; // Canopy Closure for direct shortwave - if ( Canopy::canopytransmission ){ - CanopyClosureDirect = CanopyShadeSoilCover(Cdata.height, CanopyClosure, elev); + if (canopytransmission ){ + CanopyClosureDirect = CanopyShadeSoilCover(Cdata.height, CanopyClosure, elev, Cdata.can_diameter); CanClosDirLeaves= CanopyClosureDirect ; CanClosDirTrunks=0.; @@ -742,7 +758,7 @@ void Canopy::LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData // net absorbed by canopy // first only diffuse fraction rsnet = RadFracDiffuse * iswrac * (1. - canopyalb) * sigf * - (1. + ag * (1. - sigf) / (1. - sigf * ag * canopyalb)* attfactor_SW + (1. - sigf) * sigftrunk * trunkalb); + (1. + ag * (1. - sigf) / (1. - sigf * ag * canopyalb)* attfactor_SW + (1. - sigf) * sigftrunk * Cdata.trunkalb); // 1.2. LW // Longwave radiation above canopy: @@ -758,7 +774,7 @@ void Canopy::LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData const double r0p = rsnet + sigf * Cdata.ec*((1. + psi / star * attfactor_LW) * ilwrac + eg * Constants::stefan_boltzmann * Optim::pow4(Tsfc) * attfactor_LW / star); r1p = -sigf *Cdata.ec * Constants::stefan_boltzmann * (2. - Cdata.ec * sigf * (1.-eg)/star * attfactor_LW); - r2p = sigf * Cdata.ec * Constants::stefan_boltzmann * et *sigftrunk * (1. + (1. -eg)); + r2p = sigf * Cdata.ec * Constants::stefan_boltzmann * Cdata.et *sigftrunk * (1. + (1. -eg)); // Linearise RNC around TC(t) and Ttrunk(t) by using TC(t)^4=TC(t-1)^4+4*TC(t-1)^3*(TC(t)-TC(t-1)), // which gives us r0, r1, and r2 correpsonding to RNC(t)=r0+r1*TC(t)+ r2* Ttrunk @@ -777,7 +793,7 @@ void Canopy::LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData // Now, add the direct component with different CanopyClosure const double rsnetdir = CanClosDirLeaves * RadFracDirect * iswrac * - (1. - canopyalb) * sigfdirect * (1. + ag * (1. - sigfdirect) *attfactor_SWdir / (1. - sigfdirect * ag * canopyalb) + (1. - sigfdirect) * sigftrunkdirect * trunkalb) + (1. - canopyalb) * sigfdirect * (1. + ag * (1. - sigfdirect) *attfactor_SWdir / (1. - sigfdirect * ag * canopyalb) + (1. - sigfdirect) * sigftrunkdirect * Cdata.trunkalb) + CanClosDirTrunks * RadFracDirect * iswrac * (1. - canopyalb) * sigfdirect * ag *attfactor_SWdir/(1. - sigfdirect * ag * canopyalb); rsnet += rsnetdir; @@ -789,12 +805,12 @@ void Canopy::LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData // 2.1. SW diffuse - double rsnettrunk = RadFracDiffuse * iswrac * (1. -sigf) * (1-attfactor_SW) * (1-trunkalb); + double rsnettrunk = RadFracDiffuse * iswrac * (1. -sigf) * (1-attfactor_SW) * (1-Cdata.trunkalb); // 2.2. LW - const double rt0p = rsnettrunk + et*(1-attfactor_LW)* (eg * Constants::stefan_boltzmann * Optim::pow4(Tsfc) + ilwrac * (1. - sigf)); - const double rt1p = -2. * Constants::stefan_boltzmann *et* (1-attfactor_LW) ; - const double rt2p = et*(1-attfactor_LW) * Cdata.ec * sigf * Constants::stefan_boltzmann ; + const double rt0p = rsnettrunk + Cdata.et*(1-attfactor_LW)* (eg * Constants::stefan_boltzmann * Optim::pow4(Tsfc) + ilwrac * (1. - sigf)); + const double rt1p = -2. * Constants::stefan_boltzmann *Cdata.et* (1-attfactor_LW) ; + const double rt2p = Cdata.et*(1-attfactor_LW) * Cdata.ec * sigf * Constants::stefan_boltzmann ; rt0 = rt0p - 3. * rt1p * Optim::pow4(Tt_old) - 3. * rt2p * Optim::pow4(TC_old) ; rt1 = 4.* rt1p * Optim::pow3(Tt_old); @@ -806,8 +822,8 @@ void Canopy::LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData rt2 *= CanopyClosure; // 2.3. SW direct & NetRad to Trunks - const double rsnettrunkdir = CanClosDirLeaves * RadFracDirect * iswrac * (1. -sigf) * (1. - attfactor_SWdir)*(1.-trunkalb) - + CanClosDirTrunks * RadFracDirect * iswrac * (1. - attfactor_SWdir) *(1.-trunkalb); + const double rsnettrunkdir = CanClosDirLeaves * RadFracDirect * iswrac * (1. -sigfdirect) * (1. - attfactor_SWdir)*(1.-Cdata.trunkalb) + + CanClosDirTrunks * RadFracDirect * iswrac * (1. - attfactor_SWdir) *(1.-Cdata.trunkalb); rsnettrunk += rsnettrunkdir; rt0 += rsnettrunkdir; } @@ -883,7 +899,7 @@ void Canopy::LineariseLatentHeatFlux(const double& ce_canopy, const double& tc_o * @param HMLeaves * @param HMTrunks */ -void Canopy::CalculateHeatMass(const double& height, const double& BasalArea, double& lai, double& HMLeaves, double& HMTrunks) +void Canopy::CalculateHeatMass(const double& height, const double& BasalArea, double& lai, double& HMLeaves, double& HMTrunks, const double biomass_density, const double biomass_heat_capacity) { HMLeaves= 0.001 * lai * biomass_density * biomass_heat_capacity; HMTrunks= 0.5 * BasalArea * height * biomass_density * biomass_heat_capacity; @@ -896,7 +912,7 @@ void Canopy::CalculateHeatMass(const double& height, const double& BasalArea, do * @param HM0 * @param HM1 */ -void Canopy::LineariseConductiveHeatFlux(const double& tc_old, const double& HM, double& HM0, double& HM1, const double& DT, const double& scalingfactor) +void Canopy::LineariseConductiveHeatFlux(const double& tc_old, const double& HM, double& HM0, double& HM1, const double& DT, const double& scalingfactor) const { if (CanopyHeatMass) { HM0 = -1.0 * scalingfactor * HM /H_TO_S(DT) *tc_old; @@ -1018,17 +1034,17 @@ void Canopy::CanopyEnergyBalance2L(double& h0, double& h1, double& le0, * @param HM0 * @param HM1 */ -void Canopy::CanopyEvaporationComponents(double& ce_canopy, - double& ce_transpiration, double& LECANOPY, - double& ta,double& I, double DT, +void Canopy::CanopyEvaporationComponents(const double& ce_canopy, + const double& ce_transpiration, double& LECANOPY, + const double& ta, const double& I, const double DT, double& CanopyEvaporation, double& INTEVAP, double& TRANSPIRATION, double& RNCANOPY, double& HCANOPY,double& TCANOPY, - double& r0, double& r1, double& h0, double& h1, + const double& r0, const double& r1, const double& h0, const double& h1, double& LECANOPYCORR, - double& wetfraction, double& HM0, double& HM1) + const double& wetfraction, const double& HM0, const double& HM1) { - if ( ta > Constants::freezing_tk ) { + if ( ta > Constants::meltfreeze_tk ) { CanopyEvaporation = DT * 3600.0 * LECANOPY / Constants::lh_vaporization; // [mm] } else { CanopyEvaporation = DT * 3600.0 * LECANOPY / Constants::lh_sublimation; // [mm] @@ -1044,7 +1060,7 @@ void Canopy::CanopyEvaporationComponents(double& ce_canopy, if ( INTEVAP > I ) { INTEVAP = I; CanopyEvaporation = INTEVAP + TRANSPIRATION; - if ( ta > Constants::freezing_tk ) { + if ( ta > Constants::meltfreeze_tk ) { LECANOPYCORR = CanopyEvaporation * Constants::lh_vaporization / (DT * 3600.0); } else { LECANOPYCORR = CanopyEvaporation * Constants::lh_sublimation / (DT * 3600.0); @@ -1089,19 +1105,19 @@ void Canopy::CanopyEvaporationComponents(double& ce_canopy, * @param HM1 */ -void Canopy::CanopyEvaporationComponents2L(double& ce_canopy, - double& ce_transpiration, double& LECANOPY, - double& ta, double& I, double DT, +void Canopy::CanopyEvaporationComponents2L(const double& ce_canopy, + const double& ce_transpiration, double& LECANOPY, + const double& ta, const double& I, const double DT, double& CanopyEvaporation, double& INTEVAP, double& TRANSPIRATION, double& RNCANOPY, double& HCANOPY,double& TCANOPY, double& Ttrunk, - double& TT0, double& TT1, - double& r0, double& r1, double& r2, double& h0, double& h1, + const double& TT0, const double& TT1, + const double& r0, const double& r1, const double& r2, const double& h0, const double& h1, double& LECANOPYCORR, - double& wetfraction, - double& HM0, double& HM1) + const double& wetfraction, + const double& HM0, const double& HM1) { - if ( ta > Constants::freezing_tk ) { + if ( ta > Constants::meltfreeze_tk ) { CanopyEvaporation = DT * 3600.0 * LECANOPY / Constants::lh_vaporization; // [mm] } else { CanopyEvaporation = DT * 3600.0 * LECANOPY / Constants::lh_sublimation; // [mm] @@ -1117,7 +1133,7 @@ void Canopy::CanopyEvaporationComponents2L(double& ce_canopy, if ( INTEVAP > I ) { INTEVAP = I; CanopyEvaporation = INTEVAP + TRANSPIRATION; - if ( ta > Constants::freezing_tk ) { + if ( ta > Constants::meltfreeze_tk ) { LECANOPYCORR = CanopyEvaporation * Constants::lh_vaporization / (DT * 3600.0); } else { LECANOPYCORR = CanopyEvaporation * Constants::lh_sublimation / (DT * 3600.0); @@ -1148,10 +1164,10 @@ double Canopy::get_psim(const double& xi) return log((1. + x) * (1. + x) * (1. + x * x) / 8.) - 2 * atan(x) + mio::Cst::PI / 2.; } else { // stable case from Holstlag and Bruin, following Beljaars & Holtslag 1991 - const double a = 1.; - const double b = 2./3.; - const double c = 5.; - const double d = 0.35; + static const double a = 1.; + static const double b = 2./3.; + static const double c = 5.; + static const double d = 0.35; return -(a * xi + b * (xi - c/d) * exp(-d * xi) + b * c/d); } } @@ -1169,10 +1185,10 @@ double Canopy::get_psih(const double& xi) return (2. * log((1. + x*x) / 2.) ); } else { // Stable case, func=1, equation from Holtslag and De Bruin following Beljaars & Holstlag, 1991 - const double a = 1.; - const double b = 2. / 3.; - const double c = 5.; - const double d = 0.35; + static const double a = 1.; + static const double b = 2. / 3.; + static const double c = 5.; + static const double d = 0.35; return -(pow((1. + 2. / 3. * a * xi), 3. / 2.) + b * (xi - c/d) * exp(-d * xi) + b * c/d - 1.); } } @@ -1201,7 +1217,7 @@ double Canopy::RichardsonToAeta(double za, double TempAir, double DiffTemp, // STEP 2: Compute error in terms of Ri using etaOld and Ri2eta(etaOld) double Error = Eta / Ri2eta - Ri; // STEP 3: solve iteratively - const double acc = 0.0001; + static const double acc = 0.0001; int itt=1; while ( fabs(Error) > acc && itt <= maxitt ) { // 3.1 new Eta @@ -1260,27 +1276,26 @@ double Canopy::RichardsonToAeta(double za, double TempAir, double DiffTemp, void Canopy::CanopyTurbulentExchange(const CurrentMeteo& Mdata, const double& refheight, const double& zomg, const double& wetfraction, SnowStation& Xdata, double& ch_canopy, double& ce_canopy, double& ce_transpiration, - double& ce_interception, double& ce_condensation) + double& ce_interception, double& ce_condensation) const { const double karman = 0.4; - CanopyData *Cdata = &Xdata.Cdata; const size_t nE = Xdata.getNumberOfElements(); // check wind speed to be at least 0.1 m/s const double vw_local = (Mdata.vw>0.3)? Mdata.vw : 0.3; // canopy height above snow surface - const double zcan = Cdata->height - (Xdata.cH - Xdata.Ground); + const double zcan = Xdata.Cdata.height - (Xdata.cH - Xdata.Ground); /* * 1. displacement and roughness (mom) according to Shaw and Perreira (1981) * zdisplcan = 0.803 + 0.108 * CanDensMax - (0.462 - 0.086 * CanDensMax) *-> - *-> exp(-(0.163 + 0.283 * CanDensMax) * Cdata->lai); + *-> exp(-(0.163 + 0.283 * CanDensMax) * Xdata.Cdata.lai); * zdisplcan = std::max(0., std::min(refheight - 0.5, zdisplcan * zcan)); * 1.3 roughness length - * const double EQ1 = (0.175 - 0.098 * CanDensMax) + (-0.098 + 0.045 * CanDensMax) * log10(Cdata->lai); - * const double EQ2 = (0.150 - 0.025 * CanDensMax) + (0.122 - 0.0135 * CanDensMax) * log10(Cdata->lai); + * const double EQ1 = (0.175 - 0.098 * CanDensMax) + (-0.098 + 0.045 * CanDensMax) * log10(Xdata.Cdata.lai); + * const double EQ2 = (0.150 - 0.025 * CanDensMax) + (0.122 - 0.0135 * CanDensMax) * log10(Xdata.Cdata.lai); * zomc = std::min(RoughLmax, std::max(zcan * std::min(EQ1, EQ2), RoughLmin)) * CAN_Z0M_COEF; * 1. displacement and roughness as simple scaling of canopy height. @@ -1293,28 +1308,28 @@ void Canopy::CanopyTurbulentExchange(const CurrentMeteo& Mdata, const double& re // Shaw Perreira parameters // double CanDensMax = 0.7; - const double RoughLmin = 0.01; - const double RoughLmax = 100.; - const double zdisplcan = std::max(0., std::min(refheight - 0.5, Canopy::displ_to_canopyheight_ratio * zcan)); - const double zomc = std::max(std::max(RoughLmin, zomg), std::min(RoughLmax, Canopy::roughmom_to_canopyheight_ratio * zcan)); + static const double RoughLmin = 0.01; + static const double RoughLmax = 100.; + const double zdisplcan = std::max(0., std::min(refheight - 0.5, Xdata.Cdata.displ_to_canopyheight_ratio * zcan)); + const double zomc = std::max(std::max(RoughLmin, zomg), std::min(RoughLmax, Xdata.Cdata.roughmom_to_canopyheight_ratio * zcan)); //2. aerodynamic resistances simple approach (Blyth, 1999) //2.1 roughness length for scalars (heat and vapour) - const double zohc = Canopy::roughheat_to_roughmom_ratio * zomc; - const double zohg = Canopy::roughheat_to_roughmom_ratio * zomg; + const double zohc = Xdata.Cdata.roughheat_to_roughmom_ratio * zomc; + const double zohg = Xdata.Cdata.roughheat_to_roughmom_ratio * zomg; // update Cdata variables - Cdata->z0m = zomc; - Cdata->z0h = zohc; - Cdata->zdispl = zdisplcan; + Xdata.Cdata.z0m = zomc; + Xdata.Cdata.z0h = zohc; + Xdata.Cdata.zdispl = zdisplcan; // 2.2 Stability correction (adopted from Beljaars and Holtslag, 1991) double psim = 0.0; double psih = 0.0; - if ( Canopy::canopy_stabilitycorrection ) { + if ( Xdata.Cdata.canopy_stabilitycorrection ) { // 2.2.1 Get Aeta = Monin-Obukhov stabilityparameter from Richardson number const double aeta = RichardsonToAeta(refheight - zdisplcan, Mdata.ta, - Mdata.ta - Cdata->temp, vw_local, zomc, zohc, 5); + Mdata.ta - Xdata.Cdata.temp, vw_local, zomc, zohc, 5); psih = -get_psih(aeta) + get_psih(aeta * zohc / (refheight - zdisplcan)); psim = -get_psim(aeta) + get_psim(aeta * zomc / (refheight - zdisplcan)); } @@ -1323,43 +1338,43 @@ void Canopy::CanopyTurbulentExchange(const CurrentMeteo& Mdata, const double& re const double ustar = vw_local * karman / (log((refheight - zdisplcan) / zomc) + psim); // 2.4 TRANSFER COEFFICIENT FOR SCALARS ABOVE CANOPY - double ch = Canopy::can_ch0 / (Constants::density_air * Constants::specific_heat_air) + double ch = Xdata.Cdata.can_ch0 / (Constants::density_air * Constants::specific_heat_air) + ustar * karman / (log((refheight - zdisplcan) / zohc) + psih); const double ch_e = ustar * karman / (log((refheight - zdisplcan) / zohc) + psih); // 2.5 AERODYNAMIC RESISTANCE ABOVE CANOPY - Cdata->ra = 1. / ch; + Xdata.Cdata.ra = 1. / ch; const double ra_e = 1. / ch_e; // 2.6 CANOPY TO CANOPY LEVEL RESISTANCE if ( log(zomc / zohc) > 0.0 ) { - Cdata->rc = (log(zomc/zohc))/(karman * ustar ); + Xdata.Cdata.rc = (log(zomc/zohc))/(karman * ustar ); } else { - Cdata->rc = 0.0; + Xdata.Cdata.rc = 0.0; } // 2.7 SURFACE TO CANOPY LEVEL RESISTANCE if (log(zomc / zohg) > 0.) { - Cdata->rs = (log(zomc / zohg)) / (karman * ustar ) * (1. + Canopy::can_rs_mult * (1. - exp(-Cdata->lai))); + Xdata.Cdata.rs = (log(zomc / zohg)) / (karman * ustar ) * (1. + Xdata.Cdata.can_rs_mult * (1. - exp(-Xdata.Cdata.lai))); } else { - Cdata->rs = 0.; + Xdata.Cdata.rs = 0.; } // 2.8 a stability correction is needed for the surface to canopy level resistance - if ( Canopy::canopy_stabilitycorrection && (Cdata->rs > 0.) ) { + if ( Xdata.Cdata.canopy_stabilitycorrection && (Xdata.Cdata.rs > 0.) ) { double aeta_g = 0.; int i = 0; double rs_change = 1.; while( (i < 100) && (fabs(rs_change) > 0.0001) ) { i++; // 1. estimate ustar and ua(zdisplcan) above surface from ras and zomg, zohg, and zref = zdisplcan - const double ustar_below1 = (1. / Cdata->rs) / karman * (log(zdisplcan / zohg) + const double ustar_below1 = (1. / Xdata.Cdata.rs) / karman * (log(zdisplcan / zohg) - get_psih(aeta_g) + get_psih(aeta_g * zohg / (zdisplcan))); const double vw_zdisplcan = ustar_below1 / karman * (log(zdisplcan / zomg) - get_psim(aeta_g) + get_psim(aeta_g * zomg / (zdisplcan))); // 2. estimate aeta above surface const double Tsup = (nE>0)? Xdata.Ndata[nE].T : Mdata.ta; - aeta_g = RichardsonToAeta(zdisplcan, Cdata->temp, Cdata->temp - + aeta_g = RichardsonToAeta(zdisplcan, Xdata.Cdata.temp, Xdata.Cdata.temp - Tsup, vw_zdisplcan, zomg, zohg, 5); // 3. new guess of ustar based on uadisplcan and new aeta_g const double ustar_below2 = vw_zdisplcan * karman / (log((zdisplcan)/zomg) - @@ -1370,8 +1385,8 @@ void Canopy::CanopyTurbulentExchange(const CurrentMeteo& Mdata, const double& re get_psih(aeta_g) + get_psih(aeta_g * zohg / (zdisplcan))); // 5. new guess for AERODYNAMIC RESISTANCE below CANOPY - rs_change = 1. / ch - Cdata->rs; - Cdata->rs = 1. / ch; + rs_change = 1. / ch - Xdata.Cdata.rs; + Xdata.Cdata.rs = 1. / ch; } } @@ -1381,30 +1396,30 @@ void Canopy::CanopyTurbulentExchange(const CurrentMeteo& Mdata, const double& re * and skip soil moisture function */ if ( useSoilLayers ) { - Cdata->rstransp = Canopy::rsmin * get_f1(Cdata->iswrac)*get_f2f4(Xdata.SoilNode, &Xdata.Edata[0]) * - get_f3((1. - Mdata.rh) * Atmosphere::vaporSaturationPressure(Mdata.ta)) / Cdata->lai; + Xdata.Cdata.rstransp = Xdata.Cdata.rsmin * get_f1(Xdata.Cdata.iswrac)*get_f2f4(Xdata.SoilNode, &Xdata.Edata[0], Xdata.Cdata.wp_fraction, Xdata.Cdata.rootdepth) * + get_f3((1. - Mdata.rh) * Atmosphere::vaporSaturationPressure(Mdata.ta), Xdata.Cdata.f3_gd) / Xdata.Cdata.lai; } else { const double Temp = (nE>0)? 0. : IOUtils::K_TO_C(Mdata.ta); - Cdata->rstransp = Canopy::rsmin * get_f1(Cdata->iswrac) * get_f4(Temp) * get_f3((1. - Mdata.rh) * - Atmosphere::vaporSaturationPressure(Mdata.ta)) / Cdata->lai; + Xdata.Cdata.rstransp = Xdata.Cdata.rsmin * get_f1(Xdata.Cdata.iswrac) * get_f4(Temp) * get_f3((1. - Mdata.rh) * + Atmosphere::vaporSaturationPressure(Mdata.ta), Xdata.Cdata.f3_gd) / Xdata.Cdata.lai; } // Exchange coefficients sensible heat - ch_canopy = Constants::density_air * Constants::specific_heat_air / (Cdata->ra + Cdata->rc); + ch_canopy = Constants::density_air * Constants::specific_heat_air / (Xdata.Cdata.ra + Xdata.Cdata.rc); // latent heat interception if ( Mdata.ta < 273.15 ) { ce_condensation = 0.622 * Constants::lh_sublimation / (Constants::gas_constant_air * Mdata.ta - * Canopy::raincrease_snow * (ra_e + Cdata->rc));// * std::max(0.1,wetfraction); + * Xdata.Cdata.raincrease_snow * (ra_e + Xdata.Cdata.rc));// * std::max(0.1,wetfraction); ce_interception = 0.622 * Constants::lh_sublimation / (Constants::gas_constant_air * Mdata.ta - * Canopy::raincrease_snow * (ra_e + Cdata->rc));// * wetfraction; + * Xdata.Cdata.raincrease_snow * (ra_e + Xdata.Cdata.rc));// * wetfraction; ce_transpiration = 0.0; } else { ce_condensation = 0.622 * Constants::lh_vaporization / (Constants::gas_constant_air * Mdata.ta - * (ra_e + Cdata->rc));// * std::max(0.1,wetfraction); + * (ra_e + Xdata.Cdata.rc));// * std::max(0.1,wetfraction); ce_interception = 0.622 * Constants::lh_vaporization / (Constants::gas_constant_air * Mdata.ta - * (ra_e + Cdata->rc));// * wetfraction; + * (ra_e + Xdata.Cdata.rc));// * wetfraction; ce_transpiration = 0.622 * Constants::lh_vaporization / (Constants::gas_constant_air * Mdata.ta - * (ra_e + Cdata->rstransp + Cdata->rc));// * (1.0-wetfraction); + * (ra_e + Xdata.Cdata.rstransp + Xdata.Cdata.rc));// * (1.0-wetfraction); } ce_canopy = ce_interception * std::max(0.001, wetfraction) + ce_transpiration * (1.0 - wetfraction); @@ -1428,7 +1443,7 @@ void Canopy::CanopyTurbulentExchange(const CurrentMeteo& Mdata, const double& re * @param sigfdirect * @param sigftrunkdirect */ -void Canopy::CanopyRadiationOutput(SnowStation& Xdata, const CurrentMeteo& Mdata, double ac, double &iswrac, double &rswrac, double &iswrbc, double &rswrbc, double &ilwrac, double &rlwrac, double &ilwrbc, double &rlwrbc, double CanopyClosureDirect, double RadFracDirect, double sigfdirect, double sigftrunkdirect) +void Canopy::CanopyRadiationOutput(SnowStation& Xdata, const CurrentMeteo& Mdata, double ac, double &iswrac, double &rswrac, double &iswrbc, double &rswrbc, double &ilwrac, double &rlwrac, double &ilwrbc, double &rlwrbc, double CanopyClosureDirect, double RadFracDirect, double sigfdirect, double sigftrunkdirect) const { const bool snow = (Xdata.getNumberOfElements() > Xdata.SoilNode); const double Tsfc4 = (snow)? Optim::pow4(Xdata.Ndata[Xdata.getNumberOfElements()].T) : Optim::pow4(Mdata.ta); @@ -1436,17 +1451,19 @@ void Canopy::CanopyRadiationOutput(SnowStation& Xdata, const CurrentMeteo& Mdata const double ag1 = (snow)? Xdata.Albedo : Xdata.SoilAlb; // modifs for forestfloor_alb - const size_t nE = Xdata.getNumberOfElements(); - const double age = (snow && forestfloor_alb) ? std::max(0., Mdata.date.getJulian() - Xdata.Edata[nE-1].depositionDate.getJulian()) : 0.; // days - const double ag = (ag1 -.3)* exp(-age/7.) + 0.3; + const size_t nE = Xdata.getNumberOfElements(); + const double age = (snow && forestfloor_alb) ? std::max(0., Mdata.date.getJulian() - Xdata.Edata[nE-1].depositionDate.getJulian()) : 0.; // days + const double ag = (ag1 -.3)* exp(-age/7.) + 0.3; Xdata.Cdata.forestfloor_alb += ag ; - const double TC4 = Optim::pow4(Xdata.Cdata.temp); - const double Tt4 = Optim::pow4(Xdata.Cdata.Ttrunk); - const double sigf = Xdata.Cdata.sigf; - const double sigftrunk = Xdata.Cdata.sigftrunk; - const double ec = Xdata.Cdata.ec; - const double eg = 1.0; + const double TC4 = Optim::pow4(Xdata.Cdata.temp); + const double Tt4 = Optim::pow4(Xdata.Cdata.Ttrunk); + const double sigf = Xdata.Cdata.sigf; + const double sigftrunk = Xdata.Cdata.sigftrunk; + const double ec = Xdata.Cdata.ec; + const double eg = 1.0; + const double et = Xdata.Cdata.et; + const double trunkalb = Xdata.Cdata.trunkalb; // modifs for 2layercanopy: attfactor stands for "attenuation factor". @@ -1489,47 +1506,47 @@ void Canopy::CanopyRadiationOutput(SnowStation& Xdata, const CurrentMeteo& Mdata // Scaling of results with CanopyClosureDiffuse and CanopyClosureDirect const double CanopyClosureDiffuse = 1. - Xdata.Cdata.direct_throughfall; - - if (Twolayercanopy) { - double CanClosDirLeaves = (canopytransmission)? CanopyShadeSoilCover(Xdata.Cdata.height, CanopyClosureDiffuse, Mdata.elev) : CanopyClosureDiffuse; - double CanClosDirTrunks = 0; - /*if (canopytransmission) { // below (optional): if uncommented, allows direct solar insolation of the trunks + + if (Twolayercanopy) { + double CanClosDirLeaves = (canopytransmission)? CanopyShadeSoilCover(Xdata.Cdata.height, CanopyClosureDiffuse, Mdata.elev,Xdata.Cdata.can_diameter) : CanopyClosureDiffuse; + double CanClosDirTrunks = 0; + /*if (canopytransmission) { // below (optional): if uncommented, allows direct solar insolation of the trunks CanClosDirLeaves = CanopyShadeSoilCover(Xdata.Cdata.height*(1. - trunk_frac_height), CanopyClosureDiffuse, Mdata.elev); CanClosDirTrunks = CanopyShadeSoilCover(Xdata.Cdata.height, CanopyClosureDiffuse, Mdata.elev)- CanClosDirLeaves; }*/ - - // Shortwave fluxes (diffuse) + + // Shortwave fluxes (diffuse) rswrac = (rswrac_loc * CanopyClosureDiffuse + iswrac * ag * (1.0 - CanopyClosureDiffuse)) * (1.0 - RadFracDirect); iswrbc = (iswrbc_loc * CanopyClosureDiffuse + iswrac * (1.0 - CanopyClosureDiffuse)) * (1.0 - RadFracDirect); rswrbc = (rswrbc_loc * CanopyClosureDiffuse + iswrac * ag * (1.0 - CanopyClosureDiffuse)) * (1.0 - RadFracDirect); - + // Shortwave fluxes (direct) rswrac += (rswrac_loc2 * CanClosDirLeaves + rswrac_loc3 * CanClosDirTrunks + iswrac * ag * (1.0 - CanClosDirTrunks - CanClosDirLeaves)) * RadFracDirect; iswrbc += (iswrbc_loc2 * CanClosDirLeaves + iswrbc_loc3 * CanClosDirTrunks + iswrac * (1.0 - CanClosDirTrunks - CanClosDirLeaves)) * RadFracDirect; rswrbc += (rswrbc_loc2 * CanClosDirLeaves + rswrbc_loc3 * CanClosDirTrunks + iswrac * ag * (1.0 - CanClosDirTrunks - CanClosDirLeaves)) *RadFracDirect; - + // Longwave fluxes (treat as diffuse) rlwrac = rlwrac * CanopyClosureDiffuse + Constants::stefan_boltzmann * eg * Tsfc4 * (1.0-CanopyClosureDiffuse); ilwrbc = ilwrbc * CanopyClosureDiffuse + ilwrac * (1.0 - CanopyClosureDiffuse); rlwrbc = rlwrbc * CanopyClosureDiffuse + Constants::stefan_boltzmann * eg * Tsfc4 * (1.0-CanopyClosureDiffuse); - + // radiations to trunks Xdata.Cdata.SWnet_Trunks = (1.0 - RadFracDirect) * iswrac * (1. -sigf) * (1.-trunkalb)*(1-attfactor_SW) * CanopyClosureDiffuse - + CanClosDirLeaves * RadFracDirect *iswrac * (1. -sigf) * (1.-trunkalb)*(1. - attfactor_SWdir) + + CanClosDirLeaves * RadFracDirect *iswrac * (1. -sigfdirect) * (1.-trunkalb)*(1. - attfactor_SWdir) + CanClosDirTrunks * RadFracDirect *iswrac * (1.-trunkalb)*(1. - attfactor_SWdir) ; Xdata.Cdata.LWnet_Trunks = RAT * CanopyClosureDiffuse ; - + } else { - // Shortwave fluxes (diffuse) + // Shortwave fluxes (diffuse) rswrac = (rswrac_loc * CanopyClosureDiffuse + iswrac * ag * (1.0 - CanopyClosureDiffuse)) * (1.0 - RadFracDirect); iswrbc = (iswrbc_loc * CanopyClosureDiffuse + iswrac * (1.0 - CanopyClosureDiffuse)) * (1.0 - RadFracDirect); rswrbc = (rswrbc_loc * CanopyClosureDiffuse + iswrac * ag * (1.0 - CanopyClosureDiffuse)) * (1.0 - RadFracDirect); - + // Shortwave fluxes (direct) rswrac += (rswrac_loc2 * CanopyClosureDirect + iswrac * ag * (1.0 - CanopyClosureDirect)) * RadFracDirect; iswrbc += (iswrbc_loc2 * CanopyClosureDirect + iswrac * (1.0 - CanopyClosureDirect)) * RadFracDirect; rswrbc += (rswrbc_loc2 * CanopyClosureDirect + iswrac * ag * (1.0 - CanopyClosureDirect)) *RadFracDirect; - + // Longwave fluxes (treat as diffuse) rlwrac = rlwrac * CanopyClosureDiffuse + Constants::stefan_boltzmann * eg * Tsfc4 * (1.0-CanopyClosureDiffuse); ilwrbc = ilwrbc * CanopyClosureDiffuse + ilwrac * (1.0 - CanopyClosureDiffuse); @@ -1553,20 +1570,22 @@ void Canopy::CanopyRadiationOutput(SnowStation& Xdata, const CurrentMeteo& Mdata * @param height_of_wind_val * @param adjust_VW_height if set to false, assumes a constant measurement height for wind values (default: true, ie. * take into account the snow height decreasing the sensor height above the surface) + * @return true if the canopy module could be used, false if not (canopy under the snow, etc) */ -void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roughness_length, double height_of_wind_val, const bool& adjust_VW_height) +bool Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, const double& roughness_length, const double& height_of_wind_val, const bool& adjust_VW_height) { - if (Xdata.Cdata.direct_throughfall==1.) - throw InvalidArgumentException("Can not use Canopy with CanopyDirectThroughfall == 1", AT); - + Twolayercanopy = Twolayercanopy_user; //so we can temporarily overwrite the user's choice if needed const double hs = Xdata.cH - Xdata.Ground; const size_t nE = Xdata.getNumberOfElements(); - // First check, whether there is Canopy above the snow, i.e. whether s.th. needs to be done here - if ( (Xdata.Cdata.height - 0.01) < hs ) { + //no canopy or no canopy above the snow + if ( (Xdata.Cdata.lai <= 0.0) || (Xdata.Cdata.height <= 0.0) || ((Xdata.Cdata.height - 0.01) < hs)) { Xdata.Cdata.zdispl = -0.7; - return; + return false; } + if (Xdata.Cdata.direct_throughfall==1.) + throw InvalidArgumentException("Can not use Canopy with CanopyDirectThroughfall == 1", AT); + // Check that some important initial values are within reasonable bounds if ( Xdata.Cdata.temp < 203.15 ) { Xdata.Cdata.temp = 273.15; @@ -1574,16 +1593,10 @@ void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roug if ( Xdata.Cdata.storage < 0.0 ) { Xdata.Cdata.storage = 0.0; } - if ( Xdata.Cdata.lai <= 0.0 ) { - Xdata.Cdata.lai = 0.0; - return; //abort function execution, there is no canopy at this point - } - if ( Xdata.Cdata.height <= 0.0 ) { - Xdata.Cdata.height = 0.0; - } + Xdata.Cdata.snowfac += Mdata.psum * (1. - Mdata.psum_ph); Xdata.Cdata.rainfac += Mdata.psum * Mdata.psum_ph; - + // 1.1 compute the interception capacity [mm m-2] const double intcapacity = IntCapacity(Mdata, Xdata); @@ -1610,7 +1623,7 @@ void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roug } // 1.3 compute the interception [mm timestep-1] and update storage [mm] - const double interception = IntRate(intcapacity, Xdata.Cdata.storage, Mdata.psum, Xdata.Cdata.direct_throughfall); + const double interception = IntRate(intcapacity, Xdata.Cdata.storage, Mdata.psum, Xdata.Cdata.direct_throughfall, Xdata.Cdata.interception_timecoef); oldstorage = Xdata.Cdata.storage; Xdata.Cdata.storage += interception; // 1.4 compute the throughfall [mm timestep-1] (and update liquid fraction if SnowMIP) @@ -1621,11 +1634,11 @@ void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roug const double ground_liquid_precip = Mdata.psum * Mdata.psum_ph - liqmm_interception + liqmm_unload; Mdata.psum = ground_solid_precip + ground_liquid_precip; Mdata.psum_ph = (Mdata.psum>0)? ground_liquid_precip / Mdata.psum : 1.; - + if (Xdata.Cdata.storage>0.) { Xdata.Cdata.liquidfraction = std::max(0.0,std::min(1.0,(oldstorage*Xdata.Cdata.liquidfraction+liqmm_interception)/Xdata.Cdata.storage)); } - + // 2.1 prepare for canopy energy balance // Wetfraction update is moved to canopy energy balance loop - use old value first double wetfrac = Xdata.Cdata.wetfraction; @@ -1634,17 +1647,17 @@ void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roug //(could possibly be a function of interception - but is constant for the moment) //First, transmissivity of diffuse (and longwave) radiation const double epsilon = 1e-3; - double lai_frac_top = lai_frac_top_default;// fraction of the total lai attributed to the uppermost layer. If 1.,equivalent to 1-layer canopy. + double lai_frac_top = Xdata.Cdata.lai_frac_top_default;// fraction of the total lai attributed to the uppermost layer. If 1.,equivalent to 1-layer canopy. if ((lai_frac_top < epsilon)||(1-lai_frac_top Canopy::canopytemp_maxchange_perhour * M_TO_H(calculation_step_length)) { + if (fabs(Xdata.Cdata.temp - TC_OLD) > Xdata.Cdata.canopytemp_maxchange_perhour * M_TO_H(calculation_step_length)) { CanopyTurbulentExchange(Mdata, zref, z0m_ground, wetfrac, Xdata, ch_canopy, ce_canopy, ce_transpiration, ce_interception, ce_condensation); } @@ -1815,7 +1828,7 @@ void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roug // Now REDUCE WaterContent in the Soil Elements --- Could also be part of WaterTransport.c if (useSoilLayers) - SoilWaterUptake(Xdata.SoilNode, TRANSPIRATION, &Xdata.Edata[0]); + SoilWaterUptake(Xdata.SoilNode, TRANSPIRATION, &Xdata.Edata[0], Xdata.Cdata.wp_fraction, Xdata.Cdata.rootdepth, Xdata.Cdata.h_wilt); // final adjustment of interception storage due to evaporation Xdata.Cdata.storage = Xdata.Cdata.storage - INTEVAP; @@ -1837,7 +1850,7 @@ void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roug Xdata.Cdata.ilwrac += ilwrac; Xdata.Cdata.rlwrac += rlwrac; Xdata.Cdata.ilwrbc += ilwrbc; - Xdata.Cdata.rlwrbc += rlwrbc ; + Xdata.Cdata.rlwrbc += rlwrbc; // Net longwave and shortwave radiation of canopy [W m-2] Xdata.Cdata.rlnet += RNCANOPY-rsnet; @@ -1876,12 +1889,14 @@ void Canopy::runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roug Xdata.Cdata.intcapacity += intcapacity; Xdata.Cdata.canopyalb += canopyalb; const double albedo = (nE>Xdata.SoilNode)? Xdata.Albedo : Xdata.SoilAlb; - Xdata.Cdata.totalalb = TotalAlbedo(canopyalb, Xdata.Cdata.sigf, albedo, - Xdata.Cdata.direct_throughfall, canopyclosuredirect, radfracdirect, sigfdirect); + Xdata.Cdata.totalalb += TotalAlbedo(canopyalb, Xdata.Cdata.sigf, albedo, + Xdata.Cdata.direct_throughfall, canopyclosuredirect, radfracdirect, sigfdirect); // modifs for HeatMass and 2layercanopy: new fluxes, to be updated here for EB closure reasons - Xdata.Cdata.CondFluxCanop += HM0 + HM1 * Xdata.Cdata.temp; + Xdata.Cdata.CondFluxCanop += HM0 + HM1 * Xdata.Cdata.temp; if (Twolayercanopy) { - Xdata.Cdata.CondFluxTrunks += HMt0 + HMt1 * Xdata.Cdata.Ttrunk; - Xdata.Cdata.QStrunks += ht0 + ht1 * Xdata.Cdata.Ttrunk; + Xdata.Cdata.CondFluxTrunks += HMt0 + HMt1 * Xdata.Cdata.Ttrunk; + Xdata.Cdata.QStrunks += ht0 + ht1 * Xdata.Cdata.Ttrunk; } + + return true; } diff --git a/third_party/snowpack/snowpackCore/Canopy.h b/third_party/snowpack/snowpackCore/Canopy.h index 0ba405c4..61543b58 100644 --- a/third_party/snowpack/snowpackCore/Canopy.h +++ b/third_party/snowpack/snowpackCore/Canopy.h @@ -29,8 +29,8 @@ /** * @brief Computes interception of precipitation and radiation, and reduction of windspeed * in a canopy layer above thesnow or soil surface. - * This has been published in Gouttevin, I., M. Lehning, T. Jonas, D. Gustafsson, and Meelis Mölder, - * "A two-layer canopy model with thermal inertia for an improved snowpack energy balance below needleleaf forest + * This has been published in Gouttevin, I., M. Lehning, T. Jonas, D. Gustafsson, and Meelis Mölder, + * "A two-layer canopy model with thermal inertia for an improved snowpack energy balance below needleleaf forest * (model SNOWPACK, version 3.2. 1, revision 741).", Geoscientific Model Development 8.8, pp 2379-2398, 2015. * * @section canopy_modeling Canopy modeling @@ -65,14 +65,14 @@ * There is room for improvement ! * * @section canopy_comments Important comments: - * - Snowpack can take precipitation phase (relying on the psum_ph variable) for applications such as the - * SnowMIP experiments (Rutter et al., 2009). + * - Snowpack can take precipitation phase (relying on the psum_ph variable) for applications such as the + * SnowMIP experiments (Rutter et al., 2009). * - an additionnal parameter is now required in the input/station.snoold file : CanopyBasalArea (m2/m2), * to be placed after CanopyLeafAreaIndex. * - Some cleaning was done to suppressed outputs that can be easily derived from other outputs. * There is now space for outputs specific to the 2layer model, which are written if variant = 2L_CANOPY in * [SnowpackAdvanced] (Canopy::writeTimeSeriesAdd2LCanopy). - * + * */ class Canopy { @@ -80,114 +80,134 @@ class Canopy { public: Canopy(const SnowpackConfig& i_cfg); - static void DumpCanopyData(std::ofstream &fout, const CanopyData *Cdata, const SurfaceFluxes *Sdata, const double cos_sl); - void runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, double roughness_length, - double height_of_wind_val, const bool& adjust_VW_height=true); + static void DumpCanopyHeader(std::ofstream &fout); + static void DumpCanopyUnits(std::ofstream &fout); + static void DumpCanopyData(std::ofstream &fout, const CanopyData *Cdata, + const SurfaceFluxes *Sdata, const double cos_sl); + bool runCanopyModel(CurrentMeteo &Mdata, SnowStation &Xdata, + const double& roughness_length, const double& height_of_wind_val, + const bool& adjust_VW_height=true); static void writeTimeSeriesAdd2LCanopy(std::ofstream &fout, const CanopyData *Cdata); - static const double can_alb_dry, can_alb_wet, can_alb_snow, krnt_lai; //public physics + //static const double can_alb_dry, can_alb_wet, can_alb_snow, krnt_lai; //public constants private: - double get_f1(const double& ris); - double RootFraction(const double& zupper, const double& zlower); - void SoilWaterUptake(const size_t& SoilNode, const double& transpiration, ElementData* EMS); - double get_f4(const double& tempC); - double get_f2f4(const size_t& SoilNode, ElementData* EMS); - double get_f3(const double& vpd); - double IntCapacity(const CurrentMeteo& Mdata, const SnowStation& Xdata, const bool& force_rain=false) const; - double IntUnload(const double& capacity, const double& storage); - double IntRate(const double& capacity, const double& storage, const double& prec, - const double& direct); - - double CanopyAlbedo(const double& tair, const double& wetfrac); - double TotalAlbedo(double CanAlb, double sigf, double SurfAlb, double DirectThroughfall, + static double get_f1(const double& ris); + + static double RootFraction(const double& zupper, const double& zlower, const double rootdepth); + + void SoilWaterUptake(const size_t& SoilNode, const double& transpiration, + ElementData* EMS, const double wp_fraction, + const double rootdepth, const double h_wilt) const; + + static double get_f4(const double& tempC); + + static double get_f2f4(const size_t& SoilNode, ElementData* EMS, + const double wp_fraction,const double rootdepth); + + static double get_f3(const double& vpd, const double f3_gd); + + double IntCapacity(const CurrentMeteo& Mdata, const SnowStation& Xdata, + const bool& force_rain=false) const; + + static double IntUnload(const double& capacity, const double& storage); + + static double IntRate(const double& capacity, const double& storage, const double& prec, + const double& direct, const double interception_timecoef); + + static double CanopyAlbedo(const double& tair, const double& wetfrac, const SnowStation& Xdata); + + static double TotalAlbedo(double CanAlb, double sigf, double SurfAlb, double DirectThroughfall, double CanopyClosureDirect, double RadFracDirect, double sigfdirect); - double CanopyShadeSoilCover(const double& HEIGHT, const double& COVER, const double& ELEV); - double CanopyWetFraction(const double& capacity, const double& storage); - double CanopyTransmissivity(const double& lai, const double& elev); + static double CanopyShadeSoilCover(const double& HEIGHT, const double& COVER, const double& ELEV, const double& can_diameter); + + static double CanopyWetFraction(const double& capacity, const double& storage); + + static double CanopyTransmissivity(const double& lai, const double& elev, const double krnt_lai); void LineariseNetRadiation(const CurrentMeteo& Mdata,const CanopyData& Cdata, const SnowStation& Xdata, double& iswrac, double& rsnet, double& ilwrac, double& r0,double& r1, const double& canopyalb, double& CanopyClosureDirect, double& RadFracDirect, - const double& sigfdirect, double& r1p); + const double& sigfdirect, double& r1p) const; + void LineariseNetRadiation2L(const CurrentMeteo& Mdata, const CanopyData& Cdata, const SnowStation& Xdata, double& iswrac, double& rsnet, double& ilwrac, double& r0,double& r1, double& r2, double& rt0, double& rt1, double& rt2, const double& canopyalb, double& CanopyClosureDirect, double& RadFracDirect, - const double& sigfdirect, const double& sigftrunkdirect, double& r1p, double& r2p); - void LineariseSensibleHeatFlux(const double& ch_canopy, const double& tair, double& h0, double& h1, double scalingfactor); + const double& sigfdirect, const double& sigftrunkdirect, double& r1p, double& r2p) const; + + static void LineariseSensibleHeatFlux(const double& ch_canopy, const double& tair, double& h0, double& h1, double scalingfactor); - double DSaturationPressureDT(const double& L, const double& T); - void LineariseLatentHeatFlux(const double& ce_canopy, const double& tc_old, const double& vpair, + static double DSaturationPressureDT(const double& L, const double& T); + + static void LineariseLatentHeatFlux(const double& ce_canopy, const double& tc_old, const double& vpair, double& le0, double& le1, double scalingfactor); - void CalculateHeatMass(const double& height, const double& BasalArea, double& lai ,double& HMLeaves, double& HMTrunks); - void LineariseConductiveHeatFlux(const double& tc_old, const double& HM, double& HM0, double& HM1, const double& DT, const double& scalingfactor); + static void CalculateHeatMass(const double& height, const double& BasalArea, double& lai ,double& HMLeaves, + double& HMTrunks, const double biomass_density, const double biomass_heat_capacity); + + void LineariseConductiveHeatFlux(const double& tc_old, const double& HM, double& HM0, double& HM1, const double& DT, const double& scalingfactor) const; - void CanopyEnergyBalance(const double& h0, const double& h1, const double& le0, + static void CanopyEnergyBalance(const double& h0, const double& h1, const double& le0, const double& le1, const double& HM0, const double& HM1, const double& ce_canopy, const double& ce_condensation, double& r0, double& r1, double& TCANOPY, double& RNCANOPY, double& HCANOPY, double& LECANOPY); - void CanopyEnergyBalance2L(double& h0, double& h1, double& le0, + static void CanopyEnergyBalance2L(double& h0, double& h1, double& le0, double& le1, double& HM0, double& HM1, double& TT0, double& TT1, - const double& ce_canopy, + const double& ce_canopy, const double& ce_condensation, double& r0, double& r1, double& r2, double& TCANOPY, double& Ttrunk, double& RNCANOPY, double& HCANOPY, double& LECANOPY); - void CanopyEvaporationComponents(double& ce_canopy, - double& ce_transpiration, double& LECANOPY, - double& ta,double& I, double DT, + static void CanopyEvaporationComponents(const double& ce_canopy, + const double& ce_transpiration, double& LECANOPY, + const double& ta, const double& I, const double DT, double& CanopyEvaporation, double& INTEVAP, double& TRANSPIRATION, double& RNCANOPY, double& HCANOPY,double& TCANOPY, - double& r0, double& r1, double& h0, double& h1, + const double& r0, const double& r1, const double& h0, const double& h1, double& LECANOPYCORR, - double& wetfraction, double& HM0, double& HM1); - - void CanopyEvaporationComponents2L(double& ce_canopy, - double& ce_transpiration, double& LECANOPY, - double& ta, double& I, double DT, - double& CanopyEvaporation, - double& INTEVAP, double& TRANSPIRATION, - double& RNCANOPY, double& HCANOPY,double& TCANOPY, double& Ttrunk, - double& TT0, double& TT1, - double& r0, double& r1, double& r2, double& h0, double& h1, - double& LECANOPYCORR, - double& wetfraction, - double& HM0, double& HM1); - double get_psim(const double& xi); - double get_psih(const double& xi); - double RichardsonToAeta(double za, double TempAir, double DiffTemp, double Windspeed, double zom, double zoh, int maxitt); + const double& wetfraction, const double& HM0, const double& HM1); + + static void CanopyEvaporationComponents2L(const double& ce_canopy, + const double& ce_transpiration, double& LECANOPY, + const double& ta, const double& I, const double DT, + double& CanopyEvaporation, + double& INTEVAP, double& TRANSPIRATION, + double& RNCANOPY, double& HCANOPY,double& TCANOPY, double& Ttrunk, + const double& TT0, const double& TT1, + const double& r0, const double& r1, const double& r2, const double& h0, const double& h1, + double& LECANOPYCORR, + const double& wetfraction, + const double& HM0, const double& HM1); + + static double get_psim(const double& xi); + + static double get_psih(const double& xi); + + static double RichardsonToAeta(double za, double TempAir, double DiffTemp, double Windspeed, double zom, double zoh, int maxitt); void CanopyTurbulentExchange(const CurrentMeteo& Mdata, const double& refheight, const double& zomg, const double& wetfraction, SnowStation& Xdata, double& ch_canopy, double& ce_canopy, double& ce_transpiration, - double& ce_interception, double& ce_condensation); + double& ce_interception, double& ce_condensation) const; void CanopyRadiationOutput(SnowStation& Xdata, const CurrentMeteo& Mdata, double ac, double &iswrac, double &rswrac, double &iswrbc, double &rswrbc, double &ilwrac, double &rlwrac, double &ilwrbc, double &rlwrbc, - double CanopyClosureDirect, double RadFracDirect, double sigfdirect, double sigftrunkdirect); - - static const double int_cap_snow, int_cap_rain, interception_timecoef; - static const bool canopy_stabilitycorrection; - static const double can_diameter, roughmom_to_canopyheight_ratio, displ_to_canopyheight_ratio, raincrease_snow; - static const double canopytemp_maxchange_perhour, roughheat_to_roughmom_ratio, can_ch0, can_rs_mult, rsmin; - static const double f3_gd, rootdepth, wp_fraction; + double CanopyClosureDirect, double RadFracDirect, double sigfdirect, double sigftrunkdirect) const; std::string hn_density, hn_density_parameterization, variant, watertransportmodel_soil; double hn_density_fixedValue, calculation_step_length; bool useSoilLayers; // variables for canopy heat mass and 2-layer canopy bool CanopyHeatMass; - bool Twolayercanopy; + bool Twolayercanopy, Twolayercanopy_user; bool canopytransmission; bool forestfloor_alb; - static const double biomass_heat_capacity, biomass_density, lai_frac_top_default, trunk_frac_height, trunkalb, et; }; -#endif //END of Canopy.h +#endif diff --git a/third_party/snowpack/snowpackCore/Metamorphism.cc b/third_party/snowpack/snowpackCore/Metamorphism.cc index 3740d35a..7431dd89 100644 --- a/third_party/snowpack/snowpackCore/Metamorphism.cc +++ b/third_party/snowpack/snowpackCore/Metamorphism.cc @@ -20,9 +20,9 @@ #include //needed for size_t +#include "Metamorphism.h" #include "../Constants.h" #include "../Utils.h" -#include "Metamorphism.h" #include "Snowpack.h" using namespace std; @@ -56,12 +56,14 @@ using namespace mio; * - 3 Surface Hoar SH * - 4 Graupel PPgp * - 5 Not implemented yet --> thin crusts + * - 6 Technical Snow * - 7 Glacier ice * - 8 Ice layer IFil * - 9 Pure water on top of snowpack, soil, or road * - mk < 10, mk=mk+10 : first complete wetting * - mk < 20, mk=mk+10 : first melt-freeze cycle completed * - mk / 100 >= 1 : tagged snow layer + * - mk / 1000 >= 9 : marked reference level to reference height of wind and meteo values, as well as measured snow height * * SECONDARY micro-structure parameters computed by Metamorphism routine: * - N3 : coordination number (1) @@ -187,11 +189,11 @@ double Metamorphism::getCoordinationNumberN3(const double& Rho) return 1.75*(Rho/100.); // Decreases N3 to zero as density goes to zero. } - const double N_0 = 1.4153; - const double N_1 = 7.5580e-5; - const double N_2 = 5.1495e-5; - const double N_3 = 1.7345e-7; - const double N_4 = 1.8082e-10; + static const double N_0 = 1.4153; + static const double N_1 = 7.5580e-5; + static const double N_2 = 5.1495e-5; + static const double N_3 = 1.7345e-7; + static const double N_4 = 1.8082e-10; const double R_2 = Rho*Rho; const double R_3 = R_2*Rho; const double R_4 = R_2*R_2; @@ -224,17 +226,29 @@ double Metamorphism::ddRate(const ElementData& Edata) * non-static section * ************************************************************/ -Metamorphism::Metamorphism(const SnowpackConfig& cfg) - : metamorphism_model(), sn_dt(0.), new_snow_grain_size(0.) +static std::string get_model(const SnowpackConfig& cfg) +{ + std::string model; + cfg.getValue("METAMORPHISM_MODEL", "SnowpackAdvanced", model); + return model; +} + +static double get_sn_dt(const SnowpackConfig& cfg) { //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH const double calculation_step_length = cfg.get("CALCULATION_STEP_LENGTH", "Snowpack"); - sn_dt = M_TO_S(calculation_step_length); - - cfg.getValue("NEW_SNOW_GRAIN_SIZE", "SnowpackAdvanced", new_snow_grain_size); + return M_TO_S(calculation_step_length); +} - cfg.getValue("METAMORPHISM_MODEL", "SnowpackAdvanced", metamorphism_model); +static double get_nsgs(const SnowpackConfig& cfg) +{ + const double nsgs = cfg.get("NEW_SNOW_GRAIN_SIZE", "SnowpackAdvanced"); + return nsgs; +} +Metamorphism::Metamorphism(const SnowpackConfig& cfg) + : metamorphism_model( get_model(cfg) ), sn_dt( get_sn_dt(cfg) ), new_snow_grain_size( get_nsgs(cfg) ) +{ const map::const_iterator it1 = mapMetamorphismModel.find(metamorphism_model); if (it1 == mapMetamorphismModel.end()) throw InvalidArgumentException("Unknown metamorphism model: "+metamorphism_model, AT); @@ -250,7 +264,7 @@ Metamorphism::Metamorphism(const SnowpackConfig& cfg) * @param Edata * @return Rate of change (d-1) */ -double Metamorphism::spRateDEFAULT(const ElementData& Edata) +double Metamorphism::spRateDEFAULT(const ElementData& Edata) const { const double dTdZ = fabs(Edata.gradT); const double c = exp(-6000. / Edata.Te); // Original 6000. @@ -280,7 +294,7 @@ double Metamorphism::spRateDEFAULT(const ElementData& Edata) * @param *Edata * @return Rate of change (d-1) */ -double Metamorphism::spRateNIED(const ElementData& Edata) +double Metamorphism::spRateNIED(const ElementData& Edata) const { const double dTdZ = fabs(Edata.gradT); const double c = exp(-6000. / Edata.Te); // Original 6000. @@ -334,7 +348,7 @@ double Metamorphism::TGBondRate(const ElementData& Edata) * @param th_ice Volumetric ice content (1) * @return Lattice constant (mm) */ -double Metamorphism::LatticeConstant0(const double& th_ice) +double Metamorphism::LatticeConstant0(const double& th_ice) const { const double gsz0 = new_snow_grain_size; @@ -362,7 +376,7 @@ double Metamorphism::LatticeConstant0(const double& th_ice) * @return Grain radius growth rate (mm d-1) */ double Metamorphism::TGGrainRate(const ElementData& Edata, const double& Tbot, const double& Ttop, - const double& gradTSub, const double& gradTSup) + const double& gradTSub, const double& gradTSup) const { // Collect the continuum values from the element data structures const double th_i = Edata.theta[ICE]; // Ice content @@ -378,7 +392,7 @@ double Metamorphism::TGGrainRate(const ElementData& Edata, const double& Tbot, c double a = a0; if ( gsz > new_snow_grain_size ) { // Use an empirical estimation of the lattice constant - const double reg0 = 0.15, reg1 = -0.00048; // Empirical regression coefficients + static const double reg0 = 0.15, reg1 = -0.00048; // Empirical regression coefficients const double a1 = reg0 + reg1*(th_i * Constants::density_ice); a = a0 + a1*(gsz - new_snow_grain_size); } @@ -413,14 +427,14 @@ double Metamorphism::TGGrainRate(const ElementData& Edata, const double& Tbot, c double Metamorphism::ETBondRate(ElementData& Edata) { /* - * B_1...B_3 are physics computed with Brown's advanced and sophisticated + * B_1...B_3 are constants computed with Brown's advanced and sophisticated * mixture theory. Bartelt is so jealous of that fine piece of work. Please note * hist sarcastic tirade later in this unreadable program. */ - const double B_1 = 0.1436e-3; // in mm/sec - const double B_2 = -1.8850e-6; // in mm - const double B_3 = 4.6690e+3; // deg K - const double B_R = 273.; + static const double B_1 = 0.1436e-3; // in mm/sec + static const double B_2 = -1.8850e-6; // in mm + static const double B_3 = 4.6690e+3; // deg K + static const double B_R = 273.; const double rc = Edata.concaveNeckRadius(); double rbDot; // Bond radius growth rate (mm s-1) @@ -446,10 +460,10 @@ double Metamorphism::ETBondRate(ElementData& Edata) double Metamorphism::ETGrainRate(const ElementData& Edata) { // These are the routine's FUDGE FACTORs - const double C_1 = 9.403e-11; - const double C_2 = 5.860e-9; - const double C_3 = 2.900e3; - const double C_R = 273.; + static const double C_1 = 9.403e-11; + static const double C_2 = 5.860e-9; + static const double C_3 = 2.900e3; + static const double C_R = 273.; // Grain radius growth rate (mm s-1) const double rgDot = ((C_1 / Edata.rg) + C_2) * exp((C_3 / C_R) - (C_3 / Edata.Te)); @@ -473,7 +487,7 @@ double Metamorphism::PressureSintering(ElementData& Edata) if (Edata.theta[ICE] < Snowpack::min_ice_content) { return 0.; } - if (Edata.Te > Edata.melting_tk) { + if (Edata.Te > Edata.meltfreeze_tk) { return 0.; } @@ -490,16 +504,17 @@ double Metamorphism::PressureSintering(ElementData& Edata) * @param Mdata * @param Xdata */ -void Metamorphism::metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& Xdata) +void Metamorphism::metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& Xdata) const { double rgDot; // Grain growth rate (mm d-1) double rbDot; // Bond growth rate (mm d-1) double rgDotMax, rbDotMax; // Maximum grain and bond growth rates double ddDot; // Rate of dendricity change (d-1) double spDot; // Rate of sphericity change (d-1) - const double a1 = 1.11e-3, a2 = 3.65e-5; // mm3 day-1 Volumetric growth coefficients for wet snow - const double cw = 1.e8 * exp(-6000. / 273.15); + static const double a1 = 1.11e-3, a2 = 3.65e-5; // mm3 day-1 Volumetric growth coefficients for wet snow + static const double cw = 1.e8 * exp(-6000. / 273.15); const size_t nE = Xdata.getNumberOfElements(); + if (Xdata.getNumberOfElements() == 0) return; // Dereference the element pointer containing micro-structure data ElementData *EMS = &Xdata.Edata[0]; @@ -516,8 +531,8 @@ void Metamorphism::metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& X // Determine the coordination number which is purely a function of the density EMS[e].N3 = getCoordinationNumberN3(EMS[e].Rho); - // Compute local values - const double thetam_w = 1.e2 * (Constants::density_water * (EMS[e].theta[WATER]) / (EMS[e].Rho)); + // Compute local value of mass percentage of liquid water (Fig. 6 in Brun, 1989, https://doi.org/10.3189/S0260305500007576, shows the cut-off at 10%) + const double thetam_w = std::min(10., 1.e2 * (Constants::density_water * EMS[e].theta[WATER] / EMS[e].Rho)); // Constants used to limit changes in sphericity after faceting double splim1 = 20. * (new_snow_grain_size/2. - EMS[e].rg); @@ -528,7 +543,7 @@ void Metamorphism::metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& X if ( splim2 > 1.0 ) { splim2 = 1.0; } - const double splim3 = -0.7; + static const double splim3 = -0.7; const size_t marker = EMS[e].mk%100; // untag EMS[e].mk // Compute the pressure gradient (kinetic or equilibrium growth metamorphism??) @@ -684,7 +699,7 @@ void Metamorphism::metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& X EMS[e].mk += 2; // grains become fully rounded } // An ice layer forms in the snowpack for dry densities above 700 kg m-3! - if ((EMS[e].theta[ICE] > 0.763) && ((marker % 10 != 7) || (marker % 10 != 8))) { + if ((EMS[e].theta[ICE] > 0.763) && marker % 10 != 7 && marker % 10 != 8 ) { EMS[e].mk = (EMS[e].mk / 10) * 10 + 8; } } @@ -700,7 +715,7 @@ void Metamorphism::metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& X } } // Check for first complete melt-freeze cycle - else if ((marker < 20) && (marker >= 10) && (EMS[e].Te < EMS[e].melting_tk - 0.3)) { + else if ((marker < 20) && (marker >= 10) && (EMS[e].Te < EMS[e].meltfreeze_tk - 0.3)) { EMS[e].mk += 10; } @@ -710,19 +725,22 @@ void Metamorphism::metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& X /** * @brief Main routine for Metamorphism model adapted according to NIED (H. Hirashima) + * See: Hirashima H, Abe O, Sato A and Lehning M (2009) An adjustment for kinetic growth metamorphism + * to improve shear strength parameterization in the SNOWPACK model. Cold Reg. Sci. Technol., + * 59 (2-3), 169-177 (doi: 10.1016/j.coldregions.2009.05.001). * @param Mdata * @param Xdata */ -void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdata) +void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdata) const { double rgDot; // Grain growth rate (mm d-1) double rbDot; // Bond growth rate (mm d-1) double rgDotMax, rbDotMax; // Maximum grain and bond growth rates double ddDot; // Rate of dendricity change (d-1) double spDot; // Rate of sphericity change (d-1) - const double a1 = 1.11e-3, a2 = 3.65e-5; // mm3 day-1 Volumetric growth coefficients for wet snow - const double cw = 1.e8 * exp(-6000. / 273.15); - double dhfDot = Constants::undefined; //NIED (H. Hirashima) Depth hoar factor ... + static const double a1 = 1.11e-3, a2 = 3.65e-5; // mm3 day-1 Volumetric growth coefficients for wet snow + static const double cw = 1.e8 * exp(-6000. / 273.15); + double dsmDot = Constants::undefined; //NIED (H. Hirashima) Dry snow metamorphism factor... const size_t nE = Xdata.getNumberOfElements(); // Dereference the element pointer containing micro-structure data @@ -740,8 +758,8 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat // Determine the coordination number which is purely a function of the density EMS[e].N3 = getCoordinationNumberN3(EMS[e].Rho); - // Compute local values - const double thetam_w = 1.e2 * (Constants::density_water * EMS[e].theta[WATER] / EMS[e].Rho); + // Compute local value of mass percentage of liquid water (Fig. 6 in Brun, 1989, https://doi.org/10.3189/S0260305500007576, shows the cut-off at 10%) + const double thetam_w = std::min(10., 1.e2 * (Constants::density_water * EMS[e].theta[WATER] / EMS[e].Rho)); // Constants used to limit changes in sphericity after faceting double splim1 = 20. * (new_snow_grain_size/2. - EMS[e].rg); @@ -795,7 +813,7 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat spDot = wind_slab * (CALL_MEMBER_FN(*this, mapSpRate[metamorphism_model])(EMS[e])); rgDot = 0.; rbDot = 0.5 * rgDotMax; - //dhfdot = 0.; //Fz HACK You'd need to define dhfdot in this case also + //dsmdot = 0.; //Fz HACK You'd need to define dsmdot in this case also } else { //normal processes for snow // NEW SNOW @@ -809,7 +827,7 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat spDot = -0.5 * ddDot; rgDot = rbDot = 0.0; // no grain growth until dd <= 0.0 - dhfDot = -Optim::pow3(thetam_w)/16./86400.; //NIED (H. Hirashima) + dsmDot = -Optim::pow3(thetam_w)/16./86400.; //NIED (H. Hirashima) } else { // DRY new snow //NIED (H. Hirashima) ddDot = ddRate(EMS[e]); @@ -823,9 +841,13 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat const double gradV=dPdZ*7.93E-4; //NIED (H. Hirashima) hPa/m��kg/m2�ɕϊ� const double DenFact = -0.136*EMS[e].Rho+4.56; const double Diffus = std::max((2.23E-5*(1013.25/1013.25)*pow((EMS[e].Te)/273.15,1.78)),((0.78*(EMS[e].Te-273.15))+10.84)*1.0E-5); //NIED (H. Hirashima) - dhfDot = fabs(-DenFact*Diffus*gradV*(1.0-EMS[e].dhf)); + dsmDot = fabs(-DenFact*Diffus*gradV*(1.0-EMS[e].dsm)); if (fabs(EMS[e].gradT)<5.0) { - dhfDot=-60000000.*exp(-6000./EMS[e].Te)/86400.; //NIED (H. Hirashima) + if (mio::IOUtils::K_TO_C(EMS[e].Te) <= -5) { + dsmDot= -(2.44E-9*mio::IOUtils::K_TO_C(EMS[e].Te)+6.58E-8); //NIED (H. Hirashima) + } else { + dsmDot= -(-8.96E-9*mio::IOUtils::K_TO_C(EMS[e].Te)+8.46E-9); //NIED (H. Hirashima) + } } } } else { // (OLD) SNOW @@ -843,9 +865,9 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat if ( (marker%10 == 2) || EMS[e].sp > 0.5 ) { rgDot = 1. / (4. * Constants::pi * Optim::pow2(EMS[e].rg)) * (a1 + a2 * Optim::pow3(thetam_w)); rbDot = 0.6 * rgDot; - dhfDot = -(Optim::pow3(thetam_w)/16./86400.); - if ( dhfDot>-2.*cw/86400. ) { //NIED (H. Hirashima) - dhfDot=-2.*cw/86400.; + dsmDot = -(Optim::pow3(thetam_w)/16./86400.); + if ( dsmDot>-2.*cw/86400. ) { //NIED (H. Hirashima) + dsmDot=-2.*cw/86400.; } } else { rgDot = rbDot = 0.; @@ -857,9 +879,9 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat const double gradV=dPdZ*7.93E-4; //NIED (H. Hirashima) //hPa/m��kg/m2�ɕϊ� const double DenFact = -0.136*EMS[e].Rho+4.56; //NIED (H. Hirashima) const double Diffus = std::max((2.23E-5*(1013.25/1013.25)*pow((EMS[e].Te)/273.15,1.78)),((0.78*(EMS[e].Te-273.15))+10.84)*1.0E-5); //NIED (H. Hirashima) - dhfDot = fabs(-DenFact*Diffus*gradV*(1.0-EMS[e].dhf)); + dsmDot = fabs(-DenFact*Diffus*gradV*(1.0-EMS[e].dsm)); if ( fabs(EMS[e].gradT)<5.0 ) { - dhfDot=-500000000.0*exp(-6000.0/EMS[e].Te)*(5.-fabs(EMS[e].gradT))/86400.; //NIED (H. Hirashima) + dsmDot=-500000000.0*exp(-6000.0/EMS[e].Te)*(5.-fabs(EMS[e].gradT))/86400.; //NIED (H. Hirashima) } if ( dPdZ > Metamorphism::mm_tg_dpdz ) { rbDot = TGBondRate( EMS[e] ); @@ -900,16 +922,16 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat // UPDATE THE MICROSTRUCTURE PARAMETERS if(EMS[e].theta[WATER] > 0.01 ) { //NIED (H. Hirashima) - dhfDot = -(Optim::pow3(thetam_w)/16./86400.); - if(dhfDot>-2.*cw/86400.) { - dhfDot=-2.*cw/86400.; + dsmDot = -(Optim::pow3(thetam_w)/16./86400.); + if(dsmDot>-2.*cw/86400.) { + dsmDot=-2.*cw/86400.; } if (EMS[e].dd == 0.) { - dhfDot=dhfDot/2.; // HACK //Fz Hazardous comparison! + dsmDot=dsmDot/2.; // HACK //Fz Hazardous comparison! } } - EMS[e].dhf += dhfDot * sn_dt; //NIED (H. Hirashima) HACK //Fz use consistent units dDay instead of sn_dt - EMS[e].dhf = std::max(0.0, std::min(1.0, EMS[e].dhf)); //NIED (H. Hirashima) + EMS[e].dsm += dsmDot * sn_dt; //NIED (H. Hirashima) HACK //Fz use consistent units dDay instead of sn_dt + EMS[e].dsm = std::max(0.0, std::min(1.0, EMS[e].dsm)); //NIED (H. Hirashima) // Update dendricity EMS[e].dd += ddDot * dDay; EMS[e].dd = std::max(0.0, std::min(1.0, EMS[e].dd)); @@ -955,12 +977,12 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat } // First wetting //NIED (H. Hirashima) - EMS[e].snowResidualWaterContent(); - if ((marker < 10) && (EMS[e].theta[WATER] > 0.99 * EMS[e].res_wat_cont) ) { + const double theta_residual = 0.024; // See: Yamaguchi, S., Watanabe, K., Katsushima, T., Sato, A., and Kumakura, T.: Dependence of the water retention curve of snow on snow characteristics, Ann. Glaciol., 53, 6-12, doi:10.3189/2012AoG61A001, 2012. + if ((marker < 10) && (EMS[e].theta[WATER] > 0.99 * theta_residual)) { EMS[e].mk += 10; } // First melt-freeze cycle completed - else if ((marker < 20) && (marker >= 10) && (EMS[e].Te < EMS[e].melting_tk - 0.3)) { + else if ((marker < 20) && (marker >= 10) && (EMS[e].Te < EMS[e].meltfreeze_tk - 0.3)) { EMS[e].mk += 10; } @@ -968,7 +990,7 @@ void Metamorphism::metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdat } } -void Metamorphism::runMetamorphismModel(const CurrentMeteo& Mdata, SnowStation& Xdata) throw() +void Metamorphism::runMetamorphismModel(const CurrentMeteo& Mdata, SnowStation& Xdata) const throw() { CALL_MEMBER_FN(*this, mapMetamorphismModel[metamorphism_model])(Mdata, Xdata); } diff --git a/third_party/snowpack/snowpackCore/Metamorphism.h b/third_party/snowpack/snowpackCore/Metamorphism.h index e52e8988..762f52be 100644 --- a/third_party/snowpack/snowpackCore/Metamorphism.h +++ b/third_party/snowpack/snowpackCore/Metamorphism.h @@ -34,14 +34,14 @@ class SnowStation; class Metamorphism; -typedef void (Metamorphism::*MetaModelFn)(const CurrentMeteo&, SnowStation&); -typedef double (Metamorphism::*MetaSpRateFn)(const ElementData&); +typedef void (Metamorphism::*MetaModelFn)(const CurrentMeteo&, SnowStation&) const; +typedef double (Metamorphism::*MetaSpRateFn)(const ElementData&) const; class Metamorphism { public: Metamorphism(const SnowpackConfig& i_cfg); - void runMetamorphismModel(const CurrentMeteo& Mdata, SnowStation& Xdata) throw(); + void runMetamorphismModel(const CurrentMeteo& Mdata, SnowStation& Xdata) const throw(); static double csPoreArea(const ElementData& Edata); @@ -53,32 +53,32 @@ class Metamorphism { static const double max_grain_bond_ratio, wind_slab_enhance, wind_slab_vw, wind_slab_depth; private: - double TGBondRate(const ElementData& Edata); + static double TGBondRate(const ElementData& Edata); - double LatticeConstant0(const double& th_ice); + double LatticeConstant0(const double& th_ice) const; double TGGrainRate(const ElementData& Edata, const double& Tbot, const double& Ttop, - const double& gradTSub, const double& gradTSup); + const double& gradTSub, const double& gradTSup) const; - double ETBondRate(ElementData& Edata); - double ETGrainRate(const ElementData& Edata); + static double ETBondRate(ElementData& Edata); + static double ETGrainRate(const ElementData& Edata); - double PressureSintering(ElementData& Edata); + static double PressureSintering(ElementData& Edata); - void metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& Xdata); - void metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdata); + void metamorphismDEFAULT(const CurrentMeteo& Mdata, SnowStation& Xdata) const; + void metamorphismNIED(const CurrentMeteo& Mdata, SnowStation& Xdata) const; - double spRateDEFAULT(const ElementData& Edata); - double spRateNIED(const ElementData& Edata); + double spRateDEFAULT(const ElementData& Edata) const; + double spRateNIED(const ElementData& Edata) const; static const bool __init; /// mapMetamorphismModel; static std::map mapSpRate; - std::string metamorphism_model; - double sn_dt, new_snow_grain_size; + const std::string metamorphism_model; + const double sn_dt, new_snow_grain_size; }; #endif //End of Metamorphism.h diff --git a/third_party/snowpack/snowpackCore/PhaseChange.cc b/third_party/snowpack/snowpackCore/PhaseChange.cc index 40d33c3a..162bc0f8 100644 --- a/third_party/snowpack/snowpackCore/PhaseChange.cc +++ b/third_party/snowpack/snowpackCore/PhaseChange.cc @@ -19,6 +19,7 @@ */ #include "PhaseChange.h" +#include "ReSolver1d.h" #include "../Constants.h" #include "../Utils.h" @@ -35,7 +36,7 @@ using namespace std; * ALL water in the snowpack. This ensures that we can have DRY snow. (Perryanic comment!) */ const double PhaseChange::theta_r = 0.0; -const double PhaseChange::RE_theta_r = 1E-5/10; // Minimum amount of liquid water that will remain. It is recommended that this value is at least smaller than PhaseChange::RE_theta_threshold (see ReSolver1d.cc) +const double PhaseChange::RE_theta_r = 1E-5/10.; // Minimum amount of liquid water that will remain. It is recommended that this value is at least smaller than PhaseChange::RE_theta_threshold (see ReSolver1d.cc) const double PhaseChange::RE_theta_threshold = 1E-5; // Above this threshold, the element is considered in melting of freezing state. It is recommended that this value is REQUIRED_ACCURACY_THETA (see ReSolver1d.cc) //Saturated Water Content, for now we say 1.0 @@ -45,27 +46,44 @@ const double PhaseChange::theta_s = 1.0; * non-static section * ************************************************************/ +static bool get_bool(const SnowpackConfig& cfg, const std::string& key, const std::string& section) +{ + bool value; + cfg.getValue(key, section, value); + return value; +} + +static double get_double(const SnowpackConfig& cfg, const std::string& key, const std::string& section) +{ + double value; + cfg.getValue(key, section, value); + return value; +} + +static double get_sn_dt(const SnowpackConfig& cfg) +{ + //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH + const double calculation_step_length = cfg.get("CALCULATION_STEP_LENGTH", "Snowpack"); + return M_TO_S(calculation_step_length); +} + PhaseChange::PhaseChange(const SnowpackConfig& cfg) : iwatertransportmodel_snow(BUCKET), iwatertransportmodel_soil(BUCKET), watertransportmodel_snow("BUCKET"), watertransportmodel_soil("BUCKET"), - sn_dt(0.), cold_content_in(IOUtils::nodata), cold_content_soil_in(IOUtils::nodata), + sn_dt( get_sn_dt(cfg) ), cold_content_in(IOUtils::nodata), cold_content_soil_in(IOUtils::nodata), cold_content_out(IOUtils::nodata), cold_content_soil_out(IOUtils::nodata), - alpine3d(false), t_crazy_min(0.), t_crazy_max(0.) + alpine3d( get_bool(cfg, "ALPINE3D", "SnowpackAdvanced") ), t_crazy_min( get_double(cfg, "T_CRAZY_MIN", "SnowpackAdvanced") ), t_crazy_max( get_double(cfg, "T_CRAZY_MAX", "SnowpackAdvanced") ), max_theta_ice(1.), enable_ice_reservoir(false) { - //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH - double calculation_step_length = cfg.get("CALCULATION_STEP_LENGTH", "Snowpack"); - sn_dt = M_TO_S(calculation_step_length); - - cfg.getValue("ALPINE3D", "SnowpackAdvanced", alpine3d); - //Water transport model snow cfg.getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", watertransportmodel_snow); + max_theta_ice=1.; if (watertransportmodel_snow=="BUCKET") { iwatertransportmodel_snow=BUCKET; } else if (watertransportmodel_snow=="NIED") { iwatertransportmodel_snow=NIED; } else if (watertransportmodel_snow=="RICHARDSEQUATION") { iwatertransportmodel_snow=RICHARDSEQUATION; + max_theta_ice=ReSolver1d::max_theta_ice; } //Water transport model soil @@ -78,8 +96,14 @@ PhaseChange::PhaseChange(const SnowpackConfig& cfg) iwatertransportmodel_soil=RICHARDSEQUATION; } - cfg.getValue("T_CRAZY_MIN", "SnowpackAdvanced", t_crazy_min); - cfg.getValue("T_CRAZY_MAX", "SnowpackAdvanced", t_crazy_max); + // Check for ice reservoir + cfg.getValue("ICE_RESERVOIR", "SnowpackAdvanced", enable_ice_reservoir); + +} + +void PhaseChange::reset() +{ + cold_content_in = cold_content_soil_in = cold_content_out = cold_content_soil_out = IOUtils::nodata; } /** @@ -106,7 +130,7 @@ PhaseChange::PhaseChange(const SnowpackConfig& cfg) void PhaseChange::compSubSurfaceMelt(ElementData& Edata, const unsigned int nSolutes, const double& dt, double& ql_Rest, const mio::Date& date_in) { - const double T_melt=Edata.melting_tk; // Retrieve melting temperature from ElementData + const double T_melt=Edata.meltfreeze_tk; // Retrieve melting temperature from ElementData if(!Edata.checkVolContent()) prn_msg(__FILE__, __LINE__, "wrn", Date(), "wrong volumetric content"); /* @@ -118,7 +142,7 @@ void PhaseChange::compSubSurfaceMelt(ElementData& Edata, const unsigned int nSol || (Edata.theta[ICE] <= 0.0) || (Edata.theta[WATER] >= PhaseChange::theta_s)) { return; } else { - double dT = T_melt - Edata.Te; // Edata.melting_tk - Te > 0 + double dT = T_melt - Edata.Te; // Edata.meltfreeze_tk - Te > 0 // Now we take into account that there might be some extra energy that could not // be used by the element above because of complete melting dT -= ql_Rest / (Edata.c[TEMPERATURE] * Edata.Rho * Edata.L); @@ -162,35 +186,17 @@ void PhaseChange::compSubSurfaceMelt(ElementData& Edata, const unsigned int nSol } Edata.theta[ICE] += dth_i; Edata.theta[WATER] += dth_w; - Edata.theta[AIR] = std::max(0.0, 1.0 - Edata.theta[ICE] - Edata.theta[WATER] - Edata.theta[SOIL]); - // State when you have solid element - if ( Edata.theta[AIR] <= 0.0 ) { - Edata.theta[AIR] = 0.0; - } - // State when the ice content has disappeared (PERMAFROST) - if ( Edata.theta[ICE] <= 0.0 ) { - Edata.theta[ICE] = 0.0; - } - // State when the water content has disappeared (PERMAFROST) - if ( Edata.theta[WATER] <= 0.0 ) { - Edata.theta[WATER] = 0.0; - } - // State when the element is wet (PERMAFROST) - if ( Edata.theta[WATER] >= 1.0 ) { - Edata.theta[WATER] = 1.0; - } + Edata.theta[AIR] = (1. - Edata.theta[ICE] - Edata.theta[WATER] - Edata.theta[WATER_PREF] - Edata.theta[SOIL]); - // Make sure the sum of all volumetric contents is near 1 (Can make a 1% error) + // Make sure the sum of all volumetric contents is near 1, and take care of rounding errors if (!Edata.checkVolContent()) { prn_msg(__FILE__, __LINE__, "err", date_in, "Sum theta[I,W,A,S] > 1"); prn_msg(__FILE__, __LINE__, "msg-", Date(), - "Ice: %f, Water: %f, Air: %f Soil: %f", - Edata.theta[ICE], Edata.theta[WATER], Edata.theta[AIR], Edata.theta[SOIL]); + "Ice: %f, Water: %f, Water_pref: %f, Air: %f Soil: %f", + Edata.theta[ICE], Edata.theta[WATER], Edata.theta[WATER_PREF], Edata.theta[AIR], Edata.theta[SOIL]); throw IOException("In compSubSurfaceMelt!", AT); } - Edata.Rho = Constants::density_ice * Edata.theta[ICE] - + (Constants::density_water * Edata.theta[WATER] ) - + (Edata.theta[SOIL] * Edata.soil[SOIL_RHO]); + Edata.updDensity(); Edata.heatCapacity(); } } @@ -227,7 +233,7 @@ void PhaseChange::compSubSurfaceMelt(ElementData& Edata, const unsigned int nSol void PhaseChange::compSubSurfaceFrze(ElementData& Edata, const unsigned int nSolutes, const double& dt, const mio::Date& date_in) { - const double T_freeze=Edata.freezing_tk; // Retrieve melting temperature from ElementData + const double T_freeze=Edata.meltfreeze_tk; // Retrieve melting temperature from ElementData if(!Edata.checkVolContent()) prn_msg(__FILE__, __LINE__, "wrn", Date(), "wrong volumetric content"); /* @@ -240,23 +246,19 @@ void PhaseChange::compSubSurfaceFrze(ElementData& Edata, const unsigned int nSol } else { double dT = T_freeze - Edata.Te; // Adapt A to compute mass changes - double A = (Edata.c[TEMPERATURE] * Edata.Rho) / ( Constants::density_ice * Constants::lh_fusion ); + double A = (Edata.c[TEMPERATURE] * Edata.Rho) / ( Constants::density_ice * Constants::lh_fusion); // Compute the change in volumetric ice and water contents double dth_i = A * dT; double dth_w = - (Constants::density_ice / Constants::density_water) * dth_i; // Make sure that there is enough water to refreeze if ((Edata.theta[WATER] + dth_w) < cmp_theta_r) { - dth_w = - fabs( Edata.theta[WATER] - cmp_theta_r ); + dth_w = std::min(0., - ( Edata.theta[WATER] - cmp_theta_r )); dth_i = - (Constants::density_water / Constants::density_ice) * dth_w; - dT = dth_i / A; } // See if the element is pure ICE - if ((Edata.theta[ICE] + cmp_theta_r + dth_i + Edata.theta[SOIL]) >= 1.0) { - dth_w = - fabs( Edata.theta[WATER] - cmp_theta_r ); - dth_i = - (Constants::density_water / Constants::density_ice) * dth_w; - Edata.theta[WATER] = cmp_theta_r; - Edata.theta[ICE] = 1.0 - Edata.theta[SOIL] - Edata.theta[WATER]; - Edata.theta[AIR] = 0.0; + if (Edata.theta[ICE] + dth_i >= max_theta_ice) { + dth_i = std::max(0., max_theta_ice - Edata.theta[ICE]); + dth_w = - dth_i * (Constants::density_ice / Constants::density_water); } else { // Concentration of solutes for (unsigned int ii = 0; ii < nSolutes; ii++) { @@ -265,31 +267,45 @@ void PhaseChange::compSubSurfaceFrze(ElementData& Edata, const unsigned int nSol dth_i * Edata.conc[WATER][ii]) / ( Edata.theta[ICE] + dth_i); } } - Edata.theta[ICE] += dth_i; - Edata.theta[WATER] += dth_w; - Edata.theta[AIR] = std::max(0., 1.0 - Edata.theta[ICE] - Edata.theta[WATER] - Edata.theta[SOIL]); - } - // State when the element is wet (PERMAFROST) - if (Edata.theta[WATER] >= 1.0) { - prn_msg(__FILE__, __LINE__, "msg+", Date(), "Wet Element! (dth_w=%e) (compSubSurfaceFrze)", dth_w); - Edata.theta[WATER] = 1.0; } - // Make sure the sum of all volumetric contents is near 1 (Can make a 1% error) + Edata.theta[ICE] += dth_i; + Edata.theta[WATER] += dth_w; + Edata.theta[AIR] = (1. - Edata.theta[ICE] - Edata.theta[WATER] - Edata.theta[WATER_PREF] - Edata.theta[SOIL]); + + // Make sure the sum of all volumetric contents is near 1, and take care of rounding errors if (!Edata.checkVolContent()) { prn_msg(__FILE__, __LINE__, "err", date_in, "Sum theta[I,W,A,S] > 1"); prn_msg(__FILE__, __LINE__, "msg-", Date(), - "Ice: %f, Water: %f, Air: %f Soil: %f", - Edata.theta[ICE], Edata.theta[WATER], Edata.theta[AIR], Edata.theta[SOIL]); + "Ice: %f, Water: %f, Water_pref: %f, Air: %f Soil: %f", + Edata.theta[ICE], Edata.theta[WATER], Edata.theta[WATER_PREF], Edata.theta[AIR], Edata.theta[SOIL]); throw IOException("In compSubSurfaceFrze!", AT); } - Edata.Rho = Constants::density_ice * Edata.theta[ICE] + - (Constants::density_water * Edata.theta[WATER]) + - (Edata.theta[SOIL] * Edata.soil[SOIL_RHO]); + dT = dth_i / A; // Recalculate temperature change, as phase change may be limited + Edata.updDensity(); Edata.heatCapacity(); // Compute the volumetric refreezing power Edata.Qmf += (dth_i * Constants::density_ice * Constants::lh_fusion) / dt; // (W m-3) Edata.dth_w += dth_w; Edata.Te += dT; + + // Treat here the ice reservoir + if (enable_ice_reservoir) { + if (-dth_w >= Edata.theta_w_transfer) { // The water frozen comes from both PF transfer and matrix water + const double theta_i_transfer = (Constants::density_water / Constants::density_ice) * Edata.theta_w_transfer; // Volumetric content of ice formed, coming from the PF transfer + Edata.theta[ICE] -= theta_i_transfer; // Take away the PF ice from matrix ice + Edata.theta_i_reservoir += theta_i_transfer; // Add the PF ice to the ice reservoir + } else { // All water from PF transfer does not freeze + const double theta_i_transfer = (Constants::density_water / Constants::density_ice) * (-dth_w); // Volumetric content of ice formed, coming from the PF transfer + Edata.theta[ICE] -= theta_i_transfer; // Take away the PF ice from matrix ice + Edata.theta_i_reservoir += theta_i_transfer; // Add the PF ice to the ice reservoir + } + + // And check here for first wetting to set microstructural marker correctly, as not done in Resolver1d.cc + if ((Edata.theta[WATER] > 5E-6 * sn_dt) && (Edata.mk%100 < 10)) { + Edata.mk += 10; + } + + } } } @@ -298,6 +314,7 @@ void PhaseChange::compSubSurfaceFrze(ElementData& Edata, const unsigned int nSol void PhaseChange::initialize(SnowStation& Xdata) { + if (Xdata.getNumberOfElements() == 0) return; // Initialize PhaseChange: execute this function before doing any call to PhaseChange::compPhaseChange for the current time step, to reset the energy balance values. size_t e, nE; ElementData* EMS; @@ -330,6 +347,7 @@ void PhaseChange::finalize(const SurfaceFluxes& Sdata, SnowStation& Xdata, const double sum_Qmf=0.; cold_content_out=0.; cold_content_soil_out=0.; + if (Xdata.getNumberOfElements() == 0) return; ElementData* EMS; bool prn_CK = false; @@ -339,21 +357,21 @@ void PhaseChange::finalize(const SurfaceFluxes& Sdata, SnowStation& Xdata, const // In the final step compute temperature and temperature gradient, check both density and mass balance for (e = 0; e < nE; e++) { //Restructure temperature arrays - EMS[e].gradT = (NDS[e+1].T - NDS[e].T) / EMS[e].L; - EMS[e].Te = (NDS[e].T + NDS[e+1].T) / 2.0; - if (((EMS[e].Te - EMS[e].melting_tk) > 0.2) && EMS[e].theta[ICE]>0.) //handle the case of soil layers above ice/snow layers - prn_msg(__FILE__, __LINE__, "wrn", date_in, - "%s temperature Te=%f K is above melting point in element %d (nE=%d; T0=%f K, T1=%f K, theta_ice=%f)", - (e < Xdata.SoilNode) ? ("Soil") : ("Snow"), EMS[e].Te, e, nE, NDS[e].T, NDS[e+1].T, EMS[e].theta[ICE]); - if (EMS[e].theta[SOIL] < Constants::eps2) { - if (!(EMS[e].Rho > Constants::eps && EMS[e].Rho <= Constants::max_rho)) { - prn_msg(__FILE__, __LINE__, "err", date_in, "Phase Change End: rho_snow[%d]=%f", e, EMS[e].Rho); - throw IOException("Run-time error in compPhaseChange()", AT); - } + EMS[e].gradT = (NDS[e+1].T - NDS[e].T) / EMS[e].L; + EMS[e].Te = (NDS[e].T + NDS[e+1].T) / 2.0; + //if (((EMS[e].Te - EMS[e].meltfreeze_tk) > 0.2) && EMS[e].theta[ICE]>0.) //handle the case of soil layers above ice/snow layers + // prn_msg(__FILE__, __LINE__, "wrn", date_in, + // "%s temperature Te=%f K is above melting point (%f K) in element %d (nE=%d; T0=%f K, T1=%f K, theta_ice=%f)", + // (e < Xdata.SoilNode) ? ("Soil") : ("Snow"), EMS[e].Te, EMS[e].meltfreeze_tk, e, nE, NDS[e].T, NDS[e+1].T, EMS[e].theta[ICE]); + // Verify element state against maximum possible density: only water + if (!(EMS[e].Rho > Constants::eps && EMS[e].Rho <= (1.-EMS[e].theta[SOIL])*Constants::density_water + (EMS[e].theta[SOIL] * EMS[e].soil[SOIL_RHO]))) { + prn_msg(__FILE__, __LINE__, "err", date_in, "Phase Change End: volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf wat_pref:%lf air:%le", + e, nE, EMS[e].Rho, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[WATER_PREF], EMS[e].theta[AIR]); + throw IOException("Run-time error in compPhaseChange()", AT); } if (e>=Xdata.SoilNode) { // Snow element - cold_content_out += EMS[e].c[TEMPERATURE] * EMS[e].Rho * (EMS[e].Te - EMS[e].melting_tk) * EMS[e].L; + cold_content_out += EMS[e].c[TEMPERATURE] * EMS[e].Rho * (EMS[e].Te - EMS[e].meltfreeze_tk) * EMS[e].L; sum_Qmf += EMS[e].Qmf * EMS[e].L; } else { // Soil element @@ -414,24 +432,25 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in e = nE; while (e > 0) { e--; - if (EMS[e].theta[SOIL] == 0.0) { - if (verbose && !(EMS[e].Rho > 0. && EMS[e].Rho <= Constants::max_rho)) { - prn_msg(__FILE__, __LINE__, "wrn", date_in, "Phase Change Begin: rho[%d]=%f", e, EMS[e].Rho); - } + // Verify element state against maximum possible density: only water + if (!(EMS[e].Rho > Constants::eps && EMS[e].Rho <= (1.-EMS[e].theta[SOIL])*Constants::density_water + (EMS[e].theta[SOIL] * EMS[e].soil[SOIL_RHO]))) { + prn_msg(__FILE__, __LINE__, "err", date_in, "Phase Change Begin: volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf wat_pref:%lf air:%le", + e, nE, EMS[e].Rho, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[WATER_PREF], EMS[e].theta[AIR]); + throw IOException("Run-time error in compPhaseChange()", AT); } // and make sure the sum of all volumetric contents is near 1 (Can make a 1% error) if (verbose && !EMS[e].checkVolContent()) { prn_msg(__FILE__, __LINE__, "msg+", date_in, - "Phase Change Begin: Element=%d, nE=%d ICE %f, Water %f, Air %f Soil %f", - e, nE, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[AIR], EMS[e].theta[SOIL]); + "Phase Change Begin: Element=%d, nE=%d ICE %f, Water %f, Water_pref %f, Air %f, Soil %f", + e, nE, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[WATER_PREF], EMS[e].theta[AIR], EMS[e].theta[SOIL]); } double i_Te = EMS[e].Te; // Determine whether a layer can be considered dry or not. const double cmp_theta = ((iwatertransportmodel_snow==RICHARDSEQUATION && EMS[e].theta[SOIL]Constants::eps)) ? (PhaseChange::RE_theta_threshold) : (PhaseChange::theta_r); - const bool MoistLayer = (EMS[e].theta[WATER] > cmp_theta + Constants::eps) ? true : false; - if(MoistLayer==true && e==nE-1 && nE>Xdata.SoilNode) retTopNodeT=EMS[nE-1].melting_tk; + const bool MoistLayer = (EMS[e].theta[WATER] > cmp_theta + Constants::eps && EMS[e].theta[ICE] < max_theta_ice) ? true : false; + if(MoistLayer==true && e==nE-1 && nE>Xdata.SoilNode) retTopNodeT=EMS[nE-1].meltfreeze_tk; // Try melting try { @@ -450,7 +469,11 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in // Try freezing try { - if(!(iwatertransportmodel_soil==RICHARDSEQUATION && e= Xdata.SoilNode || iwatertransportmodel_soil != RICHARDSEQUATION ) { // Check if phase change did occur @@ -479,15 +502,15 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in NDS[e].T += (EMS[e].Te - i_Te); // Now check the nodal temperatures against the state of the element - if(EMS[e].theta[WATER] > cmp_theta + Constants::eps) { + if(EMS[e].theta[WATER] > cmp_theta + Constants::eps && EMS[e].theta[ICE] < max_theta_ice) { // If there is water, nodal temperatures cannot exceed melting temperature - NDS[e].T=std::max(NDS[e].T, EMS[e].melting_tk); - NDS[e+1].T=std::max(NDS[e+1].T, EMS[e].melting_tk); + NDS[e].T=std::max(NDS[e].T, EMS[e].meltfreeze_tk); + NDS[e+1].T=std::max(NDS[e+1].T, EMS[e].meltfreeze_tk); } if(EMS[e].theta[ICE] > Constants::eps) { // If there is ice, nodal temperatures cannot exceed freezing temperature - NDS[e].T=std::min(NDS[e].T, EMS[e].freezing_tk); - NDS[e+1].T=std::min(NDS[e+1].T, EMS[e].freezing_tk); + NDS[e].T=std::min(NDS[e].T, EMS[e].meltfreeze_tk); + NDS[e+1].T=std::min(NDS[e+1].T, EMS[e].meltfreeze_tk); } // We now bring the nodal temperatures in agreement with the element temperature @@ -495,8 +518,8 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in // If the element is in melting or freezing conditions (meaning that phase transition is incomplete due to limited energy available) // both nodes should be at melting or freezing temperature. We judge this by the presence of ice in melting conditions, or the presence of water in freezing conditions. - if(((EMS[e].theta[ICE] > Constants::eps && EMS[e].Te < i_Te) || (EMS[e].theta[WATER] > cmp_theta + Constants::eps && EMS[e].Te > i_Te))) { - NDS[e+1].T = NDS[e].T = (EMS[e].Te < i_Te) ? EMS[e].melting_tk : EMS[e].freezing_tk; + if(((EMS[e].theta[ICE] > Constants::eps && EMS[e].Te < i_Te) || (EMS[e].theta[WATER] > cmp_theta + Constants::eps && EMS[e].Te > i_Te)) && EMS[e].theta[ICE] < max_theta_ice) { + NDS[e+1].T = NDS[e].T = (EMS[e].Te < i_Te) ? EMS[e].meltfreeze_tk : EMS[e].meltfreeze_tk; } // Now that we adjust the nodal temperatures, correct the adjacent nodal temperatures, such that the energy content remains constant @@ -510,14 +533,14 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in // Now make the nodal temperatures reflect the state of the element they represent. Either the element is in phase transition (if-block), // or the phase transition is either complete or not occurring (else-block), - if((EMS[e].theta[ICE] > Constants::eps2 && EMS[e].Te < i_Te) || (EMS[e].theta[WATER] > cmp_theta + Constants::eps && EMS[e].Te > i_Te)) { + if(((EMS[e].theta[ICE] > Constants::eps2 && EMS[e].Te < i_Te) || (EMS[e].theta[WATER] > cmp_theta + Constants::eps && EMS[e].Te > i_Te)) && EMS[e].theta[ICE] < max_theta_ice) { // Ice present in melting conditions or water present in freezing conditions? Incomplete phase transition due to limited energy availability. // Backup current nodal temperatures, to reconstruct an energy conservative temperature array tmp_N_T_up[e] = NDS[e+1].T; // Make nodes in agreement with melting or freezing conditions - NDS[e].T = NDS[e+1].T = (EMS[e].Te < i_Te) ? EMS[e].melting_tk : EMS[e].freezing_tk; + NDS[e].T = NDS[e+1].T = (EMS[e].Te < i_Te) ? EMS[e].meltfreeze_tk : EMS[e].meltfreeze_tk; // Now that we adjust the nodal temperatures, correct the adjacent nodal temperatures, such that the energy content remains constant // First the node above @@ -540,7 +563,7 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in NDS[e].T=2.*EMS[e].Te-NDS[e+1].T; // Now check if we would have phase change in the element below. If so, we don't need to consider an energy conservative adjustment, because // the element will be treated in the next step. - if(e>0 && ((EMS[e-1].Te < EMS[e-1].freezing_tk && EMS[e-1].theta[WATER] <= cmp_theta) || (EMS[e-1].Te > EMS[e-1].melting_tk && EMS[e-1].theta[ICE] <= 0.))) { + if(e>0 && ((EMS[e-1].Te < EMS[e-1].meltfreeze_tk && EMS[e-1].theta[WATER] <= cmp_theta) || (EMS[e-1].Te > EMS[e-1].meltfreeze_tk && EMS[e-1].theta[ICE] <= 0.))) { // No phase change in element below, so adjust the adjacent nodes. if(e>1) { NDS[e-1].T+=(EMS[e-1].c[TEMPERATURE]*EMS[e-1].Rho*EMS[e-1].L)/(EMS[e-1].c[TEMPERATURE]*EMS[e-1].Rho*EMS[e-1].L + EMS[e-2].c[TEMPERATURE]*EMS[e-2].Rho*EMS[e-2].L)*(tmp_N_T_down[e]-NDS[e].T); @@ -552,22 +575,22 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in // Check the new nodal temperatures to make sure if(e cmp_theta + Constants::eps) { - NDS[e+2].T=std::max(NDS[e+2].T, EMS[e+1].melting_tk); + if(EMS[e+1].theta[WATER] > cmp_theta + Constants::eps && EMS[e+1].theta[ICE] < max_theta_ice) { + NDS[e+2].T=std::max(NDS[e+2].T, EMS[e+1].meltfreeze_tk); } if(EMS[e+1].theta[ICE] > Constants::eps) { - NDS[e+2].T=std::min(NDS[e+2].T, EMS[e+1].freezing_tk); + NDS[e+2].T=std::min(NDS[e+2].T, EMS[e+1].meltfreeze_tk); } } if(e>0) { - if(EMS[e-1].theta[WATER] > cmp_theta + Constants::eps) { - NDS[e-1].T=std::max(NDS[e-1].T, EMS[e-1].melting_tk); + if(EMS[e-1].theta[WATER] > cmp_theta + Constants::eps && EMS[e-1].theta[ICE] < max_theta_ice) { + NDS[e-1].T=std::max(NDS[e-1].T, EMS[e-1].meltfreeze_tk); } if(EMS[e-1].theta[ICE] > Constants::eps) { - NDS[e-1].T=std::min(NDS[e-1].T, EMS[e-1].freezing_tk); + NDS[e-1].T=std::min(NDS[e-1].T, EMS[e-1].meltfreeze_tk); } } - + // Recalculate the element temperature of the affected nodes EMS[e].Te=0.5*(NDS[e].T+NDS[e+1].T); if(e < nE-1) EMS[e+1].Te=0.5*(NDS[e+1].T+NDS[e+2].T); @@ -575,24 +598,21 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in } // TODO If WATER_LAYER && ql_rest > 0, consider evaporating water left in the last element above soil! } else { - if ( EMS[e].Te != i_Te && iwatertransportmodel_soil == RICHARDSEQUATION && e < Xdata.SoilNode ) { + if ( iwatertransportmodel_soil == RICHARDSEQUATION && e < Xdata.SoilNode ) { // In case we use Richards equation for soil and have recent phase changes (Te != i_Te), then, adjust nodes accordingly. if(e==nE-1) { NDS[e+1].T+=EMS[e].Te-i_Te; retTopNodeT=NDS[e+1].T; - } else if (e==Xdata.SoilNode-1) { - NDS[e+1].T=(EMS[e].Te + EMS[e+1].Te)/2.0; - } else { - NDS[e+1].T+=0.5*(EMS[e].Te-i_Te); } - NDS[e].T+=0.5*(EMS[e].Te-i_Te); + // Adjust lower node of element. A bit of an ad-hoc solution to only have the lower node absorb the energy change, but it's difficult to find a good, consistent solution here. + NDS[e].T+=(EMS[e].Te-i_Te); } else { // In case we use Richards equation for soil, phase changes will be calculated in ReSolver1d::SolveRichardsEquation // Nevertheless, we need to make sure to define the return value: if(e==nE-1 && nE==Xdata.SoilNode) { - if(EMS[e].theta[ICE] > Constants::eps) { + if(EMS[e].theta[ICE] > Constants::eps && EMS[e].theta[ICE] < max_theta_ice) { // When soil is freezing or thawing when using Richards Equation, we should return the melting temperature. - retTopNodeT=EMS[e].melting_tk; + retTopNodeT=EMS[e].meltfreeze_tk; } else { retTopNodeT=NDS[e+1].T; } @@ -604,22 +624,66 @@ double PhaseChange::compPhaseChange(SnowStation& Xdata, const mio::Date& date_in throw; } + // Calculate the cumulated ice reservoir, from the bottom to the top + if (enable_ice_reservoir) { + // First, reset the CIR to zero (recalculated every time step) + e = nE; + while (e > 0) { + e--; + EMS[e].theta_i_reservoir_cumul = 0.; + } + e = 0; // Initialize e at the bottom + bool CIR_to_fill = false; + size_t e_CIR = 0; + double reservoir_residual_ice = 0.; + size_t e_loc = 0; + while (e < nE) { // Until the top of the snowpack + if (CIR_to_fill) { // If there is a cumulated reservoir currently available to fill + if (EMS[e].theta_i_reservoir > 0.) { // If there is ice in the reservoir + EMS[e_CIR].theta_i_reservoir_cumul += EMS[e].theta_i_reservoir; // Ice of the reservoir added to the layer of the open cumulated ice reservoir + } else { + CIR_to_fill = false; + // Test now if the last cumulated ice reservoir is full + if ( (EMS[e_CIR].theta[ICE]+EMS[e_CIR].theta[WATER]+EMS[e_CIR].theta[WATER_PREF]+EMS[e_CIR].theta[SOIL]+EMS[e_CIR].theta_i_reservoir_cumul >= 0.763) && (EMS[e_CIR].theta_i_reservoir_cumul>0.) ) { // If the cumulated ice reservoir is full respectively to the void available in the matrix, leave a little bit of space + reservoir_residual_ice = EMS[e_CIR].theta[ICE]+EMS[e_CIR].theta[WATER]+EMS[e_CIR].theta[WATER_PREF]+EMS[e_CIR].theta[SOIL]+EMS[e_CIR].theta_i_reservoir_cumul - 0.99; // Residual ice that stays in the reservoir in order not to oversaturate the matrix + e_loc = e_CIR; + while (e_loc+1 < e) { + e_loc++; + EMS[e_loc].theta_i_reservoir = 0.; // Empty ice reservoirs + } + EMS[e_CIR].theta[ICE] += EMS[e_CIR].theta_i_reservoir_cumul - reservoir_residual_ice; // Transfer ice from the cumulated ice reservoir to the matrix at lowest layer + EMS[e_CIR].theta[AIR] = 1. - (EMS[e_CIR].theta[ICE]+EMS[e_CIR].theta[WATER]+EMS[e_CIR].theta[WATER_PREF]+EMS[e_CIR].theta[SOIL]); + EMS[e_CIR].theta_i_reservoir = reservoir_residual_ice; // Leave only potential residual ice in the reservoir + cout << "TRANSFER OF RESERVOIR ICE"; + } + } + } else { // Case CIR_to_fill==false + if (EMS[e].theta_i_reservoir > 0.) { + CIR_to_fill = true; // New cumulated ice reservoir to open here + e_CIR = e; + EMS[e_CIR].theta_i_reservoir_cumul += EMS[e].theta_i_reservoir; // Ice of the reservoir added to the cumulated reservoir + } + } + e++; + } + } + // Check surface node, in case TSS is above melting point, but the element itself is below melting point and consequently, phase changes did not occur. if (nE >= 1) { // Only check when there are elements. const double cmp_theta_r=((iwatertransportmodel_snow==RICHARDSEQUATION && EMS[nE-1].theta[SOIL]Constants::eps)) ? (PhaseChange::RE_theta_threshold) : (PhaseChange::theta_r); - if ((NDS[nE].T > EMS[nE-1].melting_tk && EMS[nE-1].theta[ICE] > Constants::eps) || (NDS[nE].T < EMS[nE-1].freezing_tk && EMS[nE-1].theta[WATER] > cmp_theta_r)) { + if ((NDS[nE].T > EMS[nE-1].meltfreeze_tk && EMS[nE-1].theta[ICE] > Constants::eps) || (NDS[nE].T < EMS[nE-1].meltfreeze_tk && EMS[nE-1].theta[WATER] > cmp_theta_r && EMS[nE-1].theta[ICE] < max_theta_ice)) { //In case the surface temperature is above the melting point of the upper element and it still consists of ice if(nE==1) { // If only 1 element is present, the bottom node is adjusted with the same amount as the upper node, so we don't alter the internal energy state of the element ... - NDS[nE-1].T+=(NDS[nE].T-EMS[nE-1].melting_tk); + NDS[nE-1].T+=(NDS[nE].T-EMS[nE-1].meltfreeze_tk); // ... and the top node is set to melting conditions - NDS[nE].T=EMS[nE-1].melting_tk; + NDS[nE].T=EMS[nE-1].meltfreeze_tk; } if(nE>1) { // If we have more than 1 element, we adjust the nE-1 node such that internal energy is conserved between element nE-1 and nE-2 ... - NDS[nE-1].T+=(EMS[nE-1].c[TEMPERATURE]*EMS[nE-1].Rho*EMS[nE-1].L)/(EMS[nE-1].c[TEMPERATURE]*EMS[nE-1].Rho*EMS[nE-1].L + EMS[nE-2].c[TEMPERATURE]*EMS[nE-2].Rho*EMS[nE-2].L)*(NDS[nE].T-EMS[nE-1].melting_tk); + NDS[nE-1].T+=(EMS[nE-1].c[TEMPERATURE]*EMS[nE-1].Rho*EMS[nE-1].L)/(EMS[nE-1].c[TEMPERATURE]*EMS[nE-1].Rho*EMS[nE-1].L + EMS[nE-2].c[TEMPERATURE]*EMS[nE-2].Rho*EMS[nE-2].L)*(NDS[nE].T-EMS[nE-1].meltfreeze_tk); // ... and the top node is set to melting conditions - NDS[nE].T=EMS[nE-1].melting_tk; + NDS[nE].T=EMS[nE-1].meltfreeze_tk; } } } diff --git a/third_party/snowpack/snowpackCore/PhaseChange.h b/third_party/snowpack/snowpackCore/PhaseChange.h index 88baf7e1..198dc5a5 100644 --- a/third_party/snowpack/snowpackCore/PhaseChange.h +++ b/third_party/snowpack/snowpackCore/PhaseChange.h @@ -33,6 +33,8 @@ class PhaseChange { public: PhaseChange(const SnowpackConfig& i_cfg); + + void reset(); void initialize(SnowStation& Xdata); //Call before first call to compPhaseChange in a time step void finalize(const SurfaceFluxes& Sdata, SnowStation& Xdata, const mio::Date& date_in); //Call after last call to compPhaseChange in a time step double compPhaseChange(SnowStation& Xdata, const mio::Date& date_in, const bool& verbose=true); //Call to do a phase change in a time step, returning the temperature of the top node (K) @@ -53,17 +55,20 @@ class PhaseChange { void compSubSurfaceFrze(ElementData& Edata, const unsigned int nSolutes, const double& dt, const mio::Date& date_in); - double sn_dt; ///< The calculation_step_length in seconds + const double sn_dt; ///< The calculation_step_length in seconds double cold_content_in; ///< cold content before first PhaseChange call (for checking energy balance) double cold_content_soil_in; ///< cold content before first PhaseChange call (for checking energy balance) double cold_content_out; ///< cold content after last PhaseChange call (for checking energy balance) double cold_content_soil_out; ///< cold content after last PhaseChange call (for checking energy balance) - bool alpine3d; ///< flag for alpine3d simulations + const bool alpine3d; ///< flag for alpine3d simulations double t_crazy_min, t_crazy_max;///< reasonable temperature bounds + double max_theta_ice; ///< maximum ice content of a layer, above which the presence of liquid water is allowed with sub-freezing temperatures - static const double theta_s; ///< Saturated Water Content, for now we say 1.0 + static const double theta_s; ///< Saturated Water Content, for now we say 1.0 + + bool enable_ice_reservoir; ///< Ice reservoir }; #endif diff --git a/third_party/snowpack/snowpackCore/ReSolver1d.cc b/third_party/snowpack/snowpackCore/ReSolver1d.cc index a9f794d7..16dde708 100644 --- a/third_party/snowpack/snowpackCore/ReSolver1d.cc +++ b/third_party/snowpack/snowpackCore/ReSolver1d.cc @@ -18,6 +18,8 @@ along with Snowpack. If not, see . */ #include "ReSolver1d.h" +#include "../vanGenuchten.h" +#include "SalinityTransport.h" #include "../Utils.h" #include "Snowpack.h" #ifdef CLAPACK @@ -53,19 +55,36 @@ using namespace mio; #pragma clang diagnostic ignored "-Wsign-conversion" #endif -ReSolver1d::ReSolver1d(const SnowpackConfig& cfg) +const double ReSolver1d::max_theta_ice = 0.99; //An ice pore space of around 5% is a reasonable value: K. M. Golden et al. The Percolation Phase Transition in Sea Ice, Science 282, 2238 (1998), doi: 10.1126/science.282.5397.2238 + +//Setting convergence criteria and numerical limits +const double ReSolver1d::REQUIRED_ACCURACY_H = 1E-5; //Required accuracy for the Richard solver: this is for the delta h convergence criterion +const double ReSolver1d::REQUIRED_ACCURACY_THETA = 1E-5; //Required accuracy for the Richard solver: this is for the delta theta convergence criterion. It is recommended to adjust PhaseChange::RE_theta_r in PhaseChanges.cc in case this value is changed. + //Huang et al. (1996) proposes 0.0001 here (=1E-4). 1E-4 causes some mass balance problems. Therefore, it is set to 1E-5. +const double ReSolver1d::convergencecriterionthreshold = 0.8; //Based on this value of theta_dim, either theta-based convergence is chosen, or h-based. Note we need to make this destinction, beacuse theta-based does not work close to saturation or with ponding. +const double ReSolver1d::MAX_ALLOWED_DELTA_H = 1E32; //Set an upper threshold for the delta_h[i] that is allowed. The idea is that when delta_h for an iteration is too large, we have a too large time step and a rewind is necessary. +const size_t ReSolver1d::INCR_ITER = 5; //Number of iterations for the Richard solver after which time step is increased. +const size_t ReSolver1d::DECR_ITER = 10; //Number of iterations for the Richard solver after which time step is decreased. +const size_t ReSolver1d::MAX_ITER = 15; //Maximum number of iterations for the Richard solver. +const double ReSolver1d::MIN_VAL_TIMESTEP = 1E-12; //Minimum time step allowed in Richards solver. Don't set this too low (let's say 1E-40), becuase the calculations are then done at the limits of the floating point precision. +const double ReSolver1d::MAX_VAL_TIMESTEP = 900.; //Maximum time step allowed in Richards solver. +const double ReSolver1d::MIN_DT_FOR_INFILTRATION=10.; //If dt is above this value, do a rewind if the matrix cannot allow for all infiltrating water +const size_t ReSolver1d::BS_MAX_ITER = 5000; //Maximum allowed number of iterations in the soil-freezing algorithm. +const double ReSolver1d::SF_epsilon = REQUIRED_ACCURACY_THETA; //Required accuracy for the root finding algorithm when solving soil freezing/thawing. + + +ReSolver1d::ReSolver1d(const SnowpackConfig& cfg, const bool& matrix_part) : surfacefluxrate(0.), soilsurfacesourceflux(0.), variant(), iwatertransportmodel_snow(BUCKET), iwatertransportmodel_soil(BUCKET), - watertransportmodel_snow("BUCKET"), watertransportmodel_soil("BUCKET"), BottomBC(FREEDRAINAGE), K_AverageType(ARITHMETICMEAN), - sn_dt(IOUtils::nodata), useSoilLayers(false), water_layer(false) + watertransportmodel_snow("BUCKET"), watertransportmodel_soil("BUCKET"), BottomBC(FREEDRAINAGE), K_AverageType(ARITHMETICMEAN), K_frozen_soilType(IGNORE), omega(7.), + enable_pref_flow(false), pref_flow_param_th(0.), pref_flow_param_N(0.), pref_flow_param_heterogeneity_factor(1.), enable_ice_reservoir(false), runSoilInitializer(false), + sn_dt(IOUtils::nodata), allow_surface_ponding(false), matrix(false), SalinityTransportSolver(SalinityTransport::IMPLICIT), + dz(), z(), dz_up(), dz_down(), dz_() { cfg.getValue("VARIANT", "SnowpackAdvanced", variant); - // Defines whether soil layers are used - cfg.getValue("SNP_SOIL", "Snowpack", useSoilLayers); - - //To build a thin top rain-water layer over a thin top ice layer, rocks, roads etc. - cfg.getValue("WATER_LAYER", "SnowpackAdvanced", water_layer); + //Allow for water ponding on the surface in case of high infilitration fluxes + cfg.getValue("WATER_LAYER", "SnowpackAdvanced", allow_surface_ponding); //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH double calculation_step_length = cfg.get("CALCULATION_STEP_LENGTH", "Snowpack"); @@ -106,148 +125,96 @@ ReSolver1d::ReSolver1d(const SnowpackConfig& cfg) BottomBC=GRAVITATIONALDRAINAGE; } else if (tmp_lb_cond_waterflux=="SEEPAGE") { BottomBC=SEEPAGEBOUNDARY; + } else if (tmp_lb_cond_waterflux=="SEAICE") { + BottomBC=SEAICE; } - //Set averaging method for hydraulic conductivity at the layer interfaces - std::string tmp_avg_method_K; - cfg.getValue("AVG_METHOD_HYDRAULIC_CONDUCTIVITY", "SnowpackAdvanced", tmp_avg_method_K); - if (tmp_avg_method_K=="ARITHMETICMEAN") { - K_AverageType=ARITHMETICMEAN; - } else if (tmp_avg_method_K=="GEOMETRICMEAN") { - K_AverageType=GEOMETRICMEAN; - } else if (tmp_avg_method_K=="HARMONICMEAN") { - K_AverageType=HARMONICMEAN; - } else if (tmp_avg_method_K=="MINIMUMVALUE") { - K_AverageType=MINIMUMVALUE; - } else if (tmp_avg_method_K=="UPSTREAM") { - K_AverageType=UPSTREAM; - } else { - prn_msg( __FILE__, __LINE__, "err", Date(), "Unknown averaging method for hydraulic conductivity (key: AVG_METHOD_HYDRAULIC_CONDUCTIVITY)."); - throw; - } -} + //Check for preferential flow + cfg.getValue("PREF_FLOW", "SnowpackAdvanced", enable_pref_flow); + cfg.getValue("PREF_FLOW_PARAM_TH", "SnowpackAdvanced", pref_flow_param_th); + cfg.getValue("PREF_FLOW_PARAM_N", "SnowpackAdvanced", pref_flow_param_N); + cfg.getValue("PREF_FLOW_PARAM_HETEROGENEITY_FACTOR", "SnowpackAdvanced", pref_flow_param_heterogeneity_factor); + // Check for ice reservoir + cfg.getValue("ICE_RESERVOIR", "SnowpackAdvanced", enable_ice_reservoir); -/** - * @brief Calculating pressure head from water content \n - * The following function calculates the pressure head belonging to a given water content \n - * @author Nander Wever - * @param theta Water content (m^3/m^3) - * @param theta_r Residual water content (m^3/m^3) - * @param theta_s Saturated water content (m^3/m^3) - * @param alpha Van Genuchten parameter - * @param m Van Genuchten parameter - * @param n Van Genuchten parameter - * @param Sc Cut off value - * @param h_e Air entry pressure - * @param h_d Dry limit of pressure head - */ -double ReSolver1d::fromTHETAtoH(double theta, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e, double h_d) -{ - //Inverse of Van Genuchten (1980), Equation 21: - double returnvalue; - if (theta<=theta_r) { - returnvalue=h_d; + // Check if the enforceThermalEquilibrium() function needs to be run + cfg.getValue("REQ_INITIALIZE_SOIL", "SnowpackAdvanced", runSoilInitializer); + + //Set averaging method for hydraulic conductivity at the layer interfaces + std::string tmp_avg_method_K; + if(matrix_part) { + // Setting the values for matrix domain + cfg.getValue("AVG_METHOD_HYDRAULIC_CONDUCTIVITY", "SnowpackAdvanced", tmp_avg_method_K); + if (tmp_avg_method_K=="ARITHMETICMEAN") { + K_AverageType=ARITHMETICMEAN; + } else if (tmp_avg_method_K=="LOGMEAN") { + K_AverageType=LOGMEAN; + } else if (tmp_avg_method_K=="GEOMETRICMEAN") { + K_AverageType=GEOMETRICMEAN; + } else if (tmp_avg_method_K=="HARMONICMEAN") { + K_AverageType=HARMONICMEAN; + } else if (tmp_avg_method_K=="MINIMUMVALUE") { + K_AverageType=MINIMUMVALUE; + } else if (tmp_avg_method_K=="UPSTREAM") { + K_AverageType=UPSTREAM; + } else { + prn_msg( __FILE__, __LINE__, "err", Date(), "Unknown averaging method for hydraulic conductivity (key: AVG_METHOD_HYDRAULIC_CONDUCTIVITY)."); + throw; + } } else { - if (theta >= theta_s) { - returnvalue=h_e; + // Setting the values for preferential flow domain + cfg.getValue("AVG_METHOD_HYDRAULIC_CONDUCTIVITY_PREF_FLOW", "SnowpackAdvanced", tmp_avg_method_K); + if (tmp_avg_method_K=="ARITHMETICMEAN") { + K_AverageType=ARITHMETICMEAN; + } else if (tmp_avg_method_K=="LOGMEAN") { + K_AverageType=LOGMEAN; + } else if (tmp_avg_method_K=="GEOMETRICMEAN") { + K_AverageType=GEOMETRICMEAN; + } else if (tmp_avg_method_K=="HARMONICMEAN") { + K_AverageType=HARMONICMEAN; + } else if (tmp_avg_method_K=="MINIMUMVALUE") { + K_AverageType=MINIMUMVALUE; + } else if (tmp_avg_method_K=="UPSTREAM") { + K_AverageType=UPSTREAM; } else { - returnvalue=-1.*(1./alpha)*pow( (pow(Sc*((theta-theta_r)/(theta_s-theta_r)), (-1./m)) - 1.), (1./n)); + prn_msg( __FILE__, __LINE__, "err", Date(), "Unknown averaging method for hydraulic conductivity (key: AVG_METHOD_HYDRAULIC_CONDUCTIVITY_PREF_FLOW)."); + throw; } } - return returnvalue; -} - - -/** - * @brief Calculating pressure head from water content when ice is present \n - * The following function calculates the pressure head belonging to a given water content when ice is present \n - * @author Nander Wever - * @param theta Water content (m^3/m^3) - * @param theta_r Residual water content (m^3/m^3) - * @param theta_s Saturated water content (m^3/m^3) - * @param alpha Van Genuchten parameter - * @param m Van Genuchten parameter - * @param n Van Genuchten parameter - * @param Sc Cut off value - * @param h_e Air entry pressure - * @param h_d Dry limit of pressure head - * @param theta_i Ice content (m^3/m^3) - */ -double ReSolver1d::fromTHETAtoHforICE(double theta, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e, double h_d, double theta_i) -{ - //To have same return value as fromTHETAtoH, call this function with theta_i==0. - return fromTHETAtoH(theta+(theta_i*(Constants::density_ice/Constants::density_water)), theta_r, theta_s, alpha, m, n, Sc, h_e, h_d); -} - - -/** - * @brief Calculating volumetric water content from pressure head \n - * The following function calculates the volumetric water content belonging to a given pressure head \n - * @author Nander Wever - * @param h Pressure head (m) - * @param theta_r Residual water content (m^3/m^3) - * @param theta_s Saturated water content (m^3/m^3) - * @param alpha Van Genuchten parameter - * @param m Van Genuchten parameter - * @param n Van Genuchten parameter - * @param Sc Cut off value - * @param h_e Air entry pressure - * @param h_d Dry limit of pressure head - */ -double ReSolver1d::fromHtoTHETA(double h, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e) -{ - double returnvalue=0.; - //Van Genuchten (1980), Equation 21: - if (h>h_e) { //Saturation - returnvalue=theta_s; + std::string tmp_K_frozen_soil; + cfg.getValue("HYDRAULIC_CONDUCTIVITY_FROZEN_SOIL", "SnowpackAdvanced", tmp_K_frozen_soil); + if (tmp_K_frozen_soil=="IGNORE") { + K_frozen_soilType=IGNORE; + } else if (tmp_K_frozen_soil.substr(0,5)=="OMEGA") { + K_frozen_soilType=OMEGA; + omega=::atof(tmp_K_frozen_soil.substr(5).c_str()); + if (!(omega>0.) || tmp_K_frozen_soil.length() == 5) { + prn_msg( __FILE__, __LINE__, "err", Date(), "When selecting hydraulic conductivity model for frozen soil OMEGA (key: HYDRAULIC_CONDUCTIVITY_FROZEN_SOIL), specify a positive value for omega following \"OMEGA\" (for example: \"OMEGA7\", which sets the recommended value of 7)."); + throw; + } + } else if (tmp_K_frozen_soil=="LIQUIDPORESPACE") { + K_frozen_soilType=LIQUIDPORESPACE; } else { - returnvalue=theta_r+( (theta_s-theta_r)*(1./Sc)*pow(1.+pow((alpha*fabs(h)),n),(-1.*m)) ); + prn_msg( __FILE__, __LINE__, "err", Date(), "Unknown hydraulic conductivity model for frozen soil (key: HYDRAULIC_CONDUCTIVITY_FROZEN_SOIL)."); + throw; } - return returnvalue; -} - - -/** - * @brief Calculating volumetric water content from pressure head when ice is present \n - * The following function calculates the volumetric water content belonging to a given pressure head when ice is present \n - * @author Nander Wever - * @param h Pressure head (m) - * @param theta_r Residual water content (m^3/m^3) - * @param theta_s Saturated water content (m^3/m^3) - * @param alpha Van Genuchten parameter - * @param m Van Genuchten parameter - * @param n Van Genuchten parameter - * @param Sc Cut off value - * @param h_e Air entry pressure - * @param h_d Dry limit of pressure head - * @param theta_i Ice content (m^3/m^3) - */ -double ReSolver1d::fromHtoTHETAforICE(double h, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e, double theta_i) -{ - //To have same return value as fromHtoTHETA, call this function with theta_i==0. - return fromHtoTHETA(h, theta_r, theta_s, alpha, m, n, Sc, h_e)-(theta_i*(Constants::density_ice/Constants::density_water)); -} + std::string tmp_SalinityTransportSolver = "EXPLICIT"; + cfg.getValue("SALINITYTRANSPORT_SOLVER", "SnowpackSeaice", tmp_SalinityTransportSolver, IOUtils::nothrow); + if (tmp_SalinityTransportSolver=="EXPLICIT") { + SalinityTransportSolver=SalinityTransport::EXPLICIT; + } else if (tmp_SalinityTransportSolver=="IMPLICIT") { + SalinityTransportSolver=SalinityTransport::IMPLICIT; + } else if (tmp_SalinityTransportSolver=="IMPLICIT2") { + SalinityTransportSolver=SalinityTransport::IMPLICIT2; + } else { + prn_msg( __FILE__, __LINE__, "err", Date(), "Unknown solver method for SalinityTransport (key: SALINITYTRANSPORT_SOLVER)."); + throw; + } -/** - * @brief Calculate air entry pressure head \n - * Air entry pressure head in [m] that corresponds to a maximum pore size (using Young-Laplace Equation).\n - * This is a required value for specifying water retention curves, see Ippisch et al. (2006).\n - * @author Nander Wever - * @param MaximumPoreSize Maximum pore size (diameter, not radius!) [m] - * @param Temperature Temperature for determining surface tension [K] - */ -double ReSolver1d::AirEntryPressureHead(double MaximumPoreSize, double Temperature) -{ - //Surface tension is dependent on the temperature. Most simulations will be in the temperature range of -20 - +20 degC. - //Source: http://en.wikipedia.org/wiki/Surface_tension - //Surface tension of water in N/m. - const double SurfaceTension = (Temperature > 293.)? 0.07197 : 0.07564; //Value for 25 degC vs for 0 degC - const double delta_P=-1.*(2.*SurfaceTension)/(0.5*MaximumPoreSize); - const double air_entry_head=delta_P/(Constants::density_water*Constants::g); - - return air_entry_head; + //Assign if we solve matrix or prefential flow + matrix=matrix_part; } @@ -262,7 +229,7 @@ double ReSolver1d::AirEntryPressureHead(double MaximumPoreSize, double Temperatu * @param v right part * @param x the solution */ -int ReSolver1d::TDMASolver (int n, double *a, double *b, double *c, double *v, double *x) +int ReSolver1d::TDMASolver (size_t n, double *a, double *b, double *c, double *v, double *x) { // See: http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm // This solver is very rapid, but has the problem that when elements of the matrix get very small, relative to each other, precision problems propagate. @@ -275,21 +242,24 @@ int ReSolver1d::TDMASolver (int n, double *a, double *b, double *c, double *v, d * c - sup-diagonal (means it is the diagonal above the main diagonal) -- indexed from 0..n-2 * v - right part * x - the solution - * Return value: 0 = succes, otherwise: error + * Return value: 0 = succes, otherwise: error */ - if (b[n-1]==0.) return -1; // This will cause division by 0, so return with error code. + if (b[n-1] == 0.) return -1; // This will cause division by 0, so return with error code. - for (int i = 1; i < n; i++) { + size_t i; + for (i = 1; i < n; i++) { if (b[i-1]==0.) return -1; // This will cause division by 0, so return with error code. - double m = a[i-1]/b[i-1]; - b[i] = b[i] - m * c[i - 1]; - v[i] = v[i] - m*v[i-1]; + const double m = a[i-1] / b[i-1]; + b[i] = b[i] - m * c[i-1]; + v[i] = v[i] - m * v[i-1]; } - x[n-1] = v[n-1]/b[n-1]; + x[n-1] = v[n-1] / b[n-1]; - for (int i = n - 2; i >= 0; --i) + i = n - 1; + while (i-- > 0) { x[i] = (v[i] - c[i] * x[i+1]) / b[i]; + } return 0; } @@ -313,15 +283,15 @@ int ReSolver1d::pinv(int m, int n, int lda, double *a) // = 0: successful exit. // < 0: if INFO = -i, the i-th argument had an illegal value. // > 0: For DGESVD: DBDSQR did not converge, INFO specifies how many. Superdiagonals of an intermediate bidiagonal form B did not converge to zero. -// For DGESDD: DBDSDC did not converge, updating process failed. The algorithm failed to compute an singular value. The update process of divide and conquer failed. +// For DGESDD: DBDSDC did not converge, updating process failed. The algorithm failed to compute an singular value. The update process of divide and conquer failed. { //Switch for dgesvd/dgesdd - bool useOptimezedSVD=true; //True: dgesdd is used, which is faster, but requires more memory, compared to dgesvd. Note that when dgesdd failes, the function tries dgesvd. When that fails as well, the program is terminated. - //I encountered some numerical issues with dgesdd, depending on settings in feenable (likely over- or underflow). So setting this switch to false gives a safe mode. - //Note: there are some bugreports for dgesdd. + const bool useOptimezedSVD=true; //True: dgesdd is used, which is faster, but requires more memory, compared to dgesvd. Note that when dgesdd failes, the function tries dgesvd. When that fails as well, the program is terminated. + //I encountered some numerical issues with dgesdd, depending on settings in feenable (likely over- or underflow). So setting this switch to false gives a safe mode. + //Note: there are some bugreports for dgesdd. //1D-Array for singular values: - int nSV = m < n ? m : n; //nSV: number of singular values + const int nSV = m < n ? m : n; //nSV: number of singular values double *s = (double *)calloc(nSV * sizeof *s, sizeof(double)); //2D-Arrays for matrices U and Vt: @@ -391,13 +361,11 @@ int ReSolver1d::pinv(int m, int n, int lda, double *a) //Calculate pseudo-inverse: Step 1: U.S': for (i = m-1; i >= 0; i--) { for (j = n-1; j >= 0; j--) { - //dinv[j*m+i]=0.; //This line is not necessary anymore, as we reset array to zero where it is declared. - for (k = n-1; k >= 0; k--) { //Do matrix multiplications - if (j==k) { //j==k: this is because s is actually a diagonal matrix, and therefore, represented as a vector. - if(s[k]!=0.) { //NANDER TODO HACK: I'm not happy with this solution, but how to circumvent underflow? I just want 0. in that case. - dinv[j*m+i]+=(vt[i*n+k]*(double(1.)/s[k])); //Note: vt needs to be transposed AND is in FORTRAN matrix notation, therefore, the indices appear normal again. - } - } + k=j; //This works because s is a diagonal matrix + if(s[k]!=0.) { + dinv[j*m+i]+=(vt[i*n+k]*(double(1.)/s[k])); //Note: vt needs to be transposed AND is in FORTRAN matrix notation, therefore, the indices appear normal again. + } else { + return -1; } } } @@ -429,169 +397,269 @@ int ReSolver1d::pinv(int /*m*/, int /*n*/, int /*lda*/, double */*a*/) { /** - * @brief Set soil parameters for a given soil type \n - * Set soil parameters for a given soil type \n + * @brief Initializing the finite differences grid for solving Richards Equation \n + * The function fills vectors z, dz, dz_, dz_up and dz_down. * @author Nander Wever - * @param type Soil type - * @param theta_r Residual water content - * @param theta_soil Volumetric soil content - * @param alpha Van Genuchten parameter - * @param m Van Genuchten parameter - * @param n Van Genuchten parameter - * @param ksat Saturated hydraulic conductivity - * @param he Air entry pressure + * @param EMS ElementData structure + * @param lowernode The lower node of the domain for which Richards Equation is solved. The function assumes that lowernode is contained in EMS. + * @param uppernode The upper node of the domain for which Richards Equation is solved. The function assumes that uppernode is contained in EMS. */ -void ReSolver1d::SetSoil(SoilTypes type, double *theta_r, double *theta_s, double *alpha, double *m, double *n, double *ksat, double *he) +void ReSolver1d::InitializeGrid(const vector& EMS, const size_t& lowernode, const size_t& uppernode) { - double MaximumPoreSize=0.; //Maximum pore size (diameter) in [m] - - //Set van Genuchten parameters - switch (type) { - case ORGANIC: - //Organic: Nemes (2001), Development of Soil Hydraulic Pedotransfer Functions on a European scale: Their Usefulness in the Assessment of Soil Quality. - *theta_r=0.01; - *theta_s=0.766; - *alpha=1.3; - *n=1.2039; - *ksat=8.000/(365.*24.*60.*60.); - MaximumPoreSize=0.005; - break; - - //ROSETTA Class Average Hydraulic Parameters: http://ars.usda.gov/Services/docs.htm?docid=8955 - case CLAY: - *theta_r=0.098; - *theta_s=0.459; - *n=1.253; - *alpha=1.496; - *ksat=0.14757/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case CLAYLOAM: - *theta_r=0.079; - *theta_s=0.442; - *n=1.416; - *alpha=1.581; - *ksat=0.0818/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case LOAM: - *theta_r=0.061; - *theta_s=0.399; - *alpha=1.11; - *n=1.47; - *ksat=0.02947/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case LOAMYSAND: - *theta_r=0.049; - *theta_s=0.39; - *n=1.746; - *alpha=3.475; - *ksat=1.052/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SAND: - *theta_r=0.053; - *theta_s=0.375; - *n=3.177; - *alpha=3.524; - *ksat=6.427/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SANDYCLAY: - *theta_r=0.117; - *theta_s=0.385; - *n=1.208; - *alpha=3.342; - *ksat=0.1135/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SANDYCLAYLOAM: - *theta_r=0.063; - *theta_s=0.384; - *n=1.330; - *alpha=2.109; - *ksat=0.1318/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SANDYLOAM: - *theta_r=0.039; - *theta_s=0.387; - *n=1.4488; - *alpha=2.667; - *ksat=0.3828/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SILT: - *theta_r=0.050; - *theta_s=0.489; - *n=1.6788; - *alpha=0.6577; - *ksat=0.4375/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SILTYCLAY: - *theta_r=0.111; - *theta_s=0.481; - *n=1.321; - *alpha=1.622; - *ksat=0.09616/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SILTYCLAYLOAM: - *theta_r=0.090; - *theta_s=0.482; - *n=1.5205; - *alpha=0.8395; - *ksat=0.1112/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case SILTLOAM: - *theta_r=0.065; - *theta_s=0.439; - *n=1.6634; - *alpha=0.5058; - *ksat=0.1824/(24.*60.*60.); - MaximumPoreSize=0.005; - break; - - case WFJGRAVELSAND: //Gravel/sand - *theta_r=0.01; - *theta_s=0.35; - *n=4.5; - *alpha=3.5; - *ksat=0.000003171; //Equal to 100 m/year, for clean sand and silty sand, according to: http://web.ead.anl.gov/resrad/datacoll/conuct.htm - MaximumPoreSize=0.005; - break; + // Give vectors correct size + z.resize(uppernode+1); + dz.resize(uppernode+1); + dz_.resize(uppernode+1); + dz_up.resize(uppernode+1); + dz_down.resize(uppernode+1); + + // Initialize grid + double totalheight=0.; //tracking the total height of the column + size_t i, j; //layer indices + for (i = lowernode; i <= uppernode; i++) { + dz[i]=EMS[i].L; + totalheight+=dz[i]; + if(i==0) { //Lowest element + z[i]=.5*dz[i]; + } else { + z[i]=z[i-1]+(dz[i-1]/2.+(dz[i])/2.); + } } - *he=AirEntryPressureHead(MaximumPoreSize, 273.); - *m=(*n-1.)/(*n); + //Additional domain initialization: determine grid cell sizes, and node distances. + //See for additional details on finite differences scheme with varying grid cell size: Rathfelder (1994). + double tmpheight1=0., tmpheight2=0.; + for (j=lowernode; j<=uppernode; j++) { + //Distance to lower node + if(j!=lowernode) { + dz_down[j]=z[j]-z[j-1]; + } + tmpheight1+=dz_down[j]; + //Distance to upper node + if(j!=uppernode) { + dz_up[j]=z[j+1]-z[j]; + } + tmpheight2+=dz_up[j]; + //Mean distance + //dz_[j]=0.5*(dz_down[j]+dz_up[j]); //This is the definition of dz_ by Rathfelder (2004). However, it does not work, results in mass balance errors. + dz_[j]=dz[j]; //This works. + } + dz_down[lowernode]=totalheight-tmpheight1; + dz_up[uppernode]=totalheight-tmpheight2; return; } +/** + * @brief Assemble the right hand side and assess the fluxes + * @author Nander Wever + * @param Takes many arguments, but in the future, many variables should become owned by the class. + */ +std::vector ReSolver1d::AssembleRHS( const size_t& lowernode, + const size_t& uppernode, + const std::vector& h_np1_m, + const std::vector& theta_n, + const std::vector& theta_np1_m, + const std::vector& theta_i_n, + const std::vector& theta_i_np1_m, + const std::vector& s, + const double& dt, + const std:: vector& rho, + const std::vector& k_np1_m_im12, + const std::vector& k_np1_m_ip12, + const BoundaryConditions aTopBC, + const double& TopFluxRate, + const BoundaryConditions aBottomBC, + const double& BottomFluxRate, + const SnowStation& Xdata, + SalinityTransport& Salinity, + const SalinityMixingModels& SALINITY_MIXING + ) +{ + size_t nE = (uppernode - lowernode) + 1; + std::vector term_up(nE, 0.); //Variable to support construction of the R.H.S. (R_mpfd in Celia et al., 1990). + std::vector term_down(nE, 0.); //Variable to support construction of the R.H.S. (R_mpfd in Celia et al., 1990). + std::vector term_up_crho(nE, 0.); //Variable to support construction of the R.H.S. (R_mpfd in Celia et al., 1990), assuming constant density. + std::vector term_down_crho(nE, 0.); //Variable to support construction of the R.H.S. (R_mpfd in Celia et al., 1990), assuming constant density. + + std::vector r_mpfd(nE, 0.); //Variable to support construction of the R.H.S. (R_mpfd in Celia et al., 1990). + + for (size_t i = lowernode; i <= uppernode; i++) { //We loop over all Richards solver domain layers + // Calculate density related variables + double rho_up = 0; + double rho_down = 0; + double drho_up = 0; + double drho_down = 0; + if(i==uppernode) { + rho_up = rho[i]; //0.5 * (rho[i] + Constants::density_water); + drho_up = 0.; //(Constants::density_water - rho[i]) / dz_up[i]; + } else { + rho_up = 0.5 * (rho[i] + rho[i+1]); + drho_up = (rho[i+1] - rho[i]) / (dz_up[i]); + } + if(i==lowernode) { + rho_down = 0.5 * (rho[i] + Constants::density_water + ((Xdata.Seaice!=NULL)?(SeaIce::betaS * Xdata.Seaice->OceanSalinity):(0.))); + drho_down = (rho[i] - Constants::density_water + ((Xdata.Seaice!=NULL)?(SeaIce::betaS * Xdata.Seaice->OceanSalinity):(0.))) / dz_down[i]; + } else { + rho_down = 0.5 * (rho[i] + rho[i-1]); + drho_down = (rho[i] - rho[i-1]) / (dz_down[i]); + } + + //Determine R: + term_up[i]=0.; + term_down[i]=0.; + term_up_crho[i]=0.; + term_down_crho[i]=0.; + + //Fill R.H.S. vector + //Note: the gravity term is not explicitly in Celia et al (1990). It is just z[i], as pressure head should already be scaled by rho_water * g. Then it is taken outside the nabla, by using the chain rule. + if(i==uppernode) { + if(aTopBC==NEUMANN) { //Neumann, following Equation 4 in McCord, WRR (1991). + term_up[i]=(TopFluxRate)*dz_up[i] - Xdata.cos_sl*(dz_up[i]*k_np1_m_ip12[i]) - Xdata.cos_sl*(dz_up[i]*(k_np1_m_ip12[i]/rho_up)*(z[i]+0.5*(dz_up[i]))) * (drho_up); + term_up_crho[i]=(TopFluxRate)*dz_up[i] - Xdata.cos_sl*(dz_up[i]*k_np1_m_ip12[i]); + } else { //Dirichlet + term_up[i]=0.; + term_up_crho[i]=0.; + std::cout << "DIRICHLET NOT CORRECTLY IMPLEMENTED.\n"; + throw; + } + } else { + term_up[i]=(k_np1_m_ip12[i]/rho_up)*(h_np1_m[i+1]*rho[i+1]-h_np1_m[i]*rho[i]); + term_up_crho[i]=k_np1_m_ip12[i]*(h_np1_m[i+1]-h_np1_m[i]); + //term_up_crho[i]=(k_np1_m_ip12[i]/rho_up)*(h_np1_m[i+1]*rho_up-h_np1_m[i]*rho_up); + } + if(i==lowernode) { + if(aBottomBC == NEUMANN) { //Neumann, following Equation 4 in McCord, WRR (1991). + term_down[i]=(BottomFluxRate)*dz_down[i] - Xdata.cos_sl*(dz_down[i]*k_np1_m_im12[i]) - Xdata.cos_sl*(dz_down[i]*(k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i]))) * (drho_down); + term_down_crho[i]=term_down[i] + Xdata.cos_sl*(dz_down[i]*(k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i]))) * (drho_down); + } else { //Dirichlet (r_mpfd[lowernode] should equal 0.) + term_down[i]=(term_up[i]/dz_up[i])*dz_down[i]; + term_down[i]+= + (+ Xdata.cos_sl*((k_np1_m_ip12[i]-k_np1_m_im12[i])/(dz_[i])) + + Xdata.cos_sl*(((k_np1_m_ip12[i]/rho_up)*(z[i]+0.5*(dz_up[i])) * (drho_up) - (k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i])) * (drho_down)) / (dz_[i])) + - (1./dt)*((theta_np1_m[i]-theta_n[i]) + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) + + s[i]) * dz_[i] * dz_down[i]; + term_down_crho[i]=term_down[i] + Xdata.cos_sl*(dz_down[i]*(k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i]))) * (drho_down); + } + } else { + term_down[i]=(k_np1_m_im12[i]/rho_down)*(h_np1_m[i]*rho[i] - h_np1_m[i-1]*rho[i-1]); + term_down_crho[i]=k_np1_m_im12[i]*(h_np1_m[i] - h_np1_m[i-1]); + //term_down_crho[i]=(k_np1_m_im12[i]/rho_down)*(h_np1_m[i]*rho_down - h_np1_m[i-1]*rho_down); + } + + //RHS eq. 17 in Celia et al. (1990): + r_mpfd[i]=(1./(dz_[i]))*((term_up[i]/dz_up[i])-(term_down[i]/dz_down[i])) + + Xdata.cos_sl*((k_np1_m_ip12[i]-k_np1_m_im12[i])/(dz_[i])) + + Xdata.cos_sl*(((k_np1_m_ip12[i]/rho_up)*(z[i]+0.5*(dz_up[i])) * (drho_up) - (k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i])) * (drho_down)) / (dz_[i])) + - (1./dt)*((theta_np1_m[i]-theta_n[i]) + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) + + s[i]; + + //Determine individual fluxes (defined as: negative = upward, positive = downward): + Salinity.dz_[i] = dz_[i]; + Salinity.dz_up[i] = dz_up[i]; + Salinity.dz_down[i] = dz_down[i]; + Salinity.theta1[i] = theta_n[i]; + Salinity.D[i] = 1E-10; + switch (SALINITY_MIXING) { + case NONE: + { + //No mixing: salt fluxes only based on net fluxes (1) and second term (2) is zero. + Salinity.flux_up[i] = term_up[i]/dz_up[i] + + Xdata.cos_sl*(k_np1_m_ip12[i]) + + Xdata.cos_sl*((k_np1_m_ip12[i]/rho_up)*(z[i]+0.5*(dz_up[i]))) * (drho_up); + Salinity.flux_up_2[i] = 0.; //Salinity.flux_up[i] - term_up_crho[i]/dz_up[i] - Xdata.cos_sl*(k_np1_m_ip12[i]); + + Salinity.flux_down[i] = term_down[i]/dz_down[i] + + Xdata.cos_sl*(k_np1_m_im12[i]) + + Xdata.cos_sl*((k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i]))) * (drho_down); + Salinity.flux_down_2[i] = 0.; //Salinity.flux_down[i] - term_down_crho[i]/dz_down[i] - Xdata.cos_sl*(k_np1_m_im12[i]); + break; + } + case CAPILLARY_GRAVITY: + { + //Separate capillary (1) and gravity term (2) + Salinity.flux_up[i] = term_up[i]/dz_up[i]; // Capillary term + Salinity.flux_up_2[i] = // Gravity term + + Xdata.cos_sl*(k_np1_m_ip12[i]) + + Xdata.cos_sl*((k_np1_m_ip12[i]/rho_up)*(z[i]+0.5*(dz_up[i]))) * (drho_up); + + Salinity.flux_down[i] = term_down[i]/dz_down[i]; // Capillary term + Salinity.flux_down_2[i] = // Gravity term + + Xdata.cos_sl*(k_np1_m_im12[i]) + + Xdata.cos_sl*((k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i]))) * (drho_down); + break; + } + case DENSITY_DIFFERENCE: + { + //Separate term dependent on density (1) and term constant density (2) + Salinity.flux_up[i] = term_up_crho[i]/dz_up[i] + + Xdata.cos_sl*k_np1_m_ip12[i]; + Salinity.flux_up_2[i] = (term_up[i]/dz_up[i] - term_up_crho[i]/dz_up[i]) + + Xdata.cos_sl*((k_np1_m_ip12[i]/rho_up)*(z[i]+0.5*(dz_up[i]))) * (drho_up); + + Salinity.flux_down[i] = term_down_crho[i]/dz_down[i] + + Xdata.cos_sl*k_np1_m_im12[i]; + Salinity.flux_down_2[i] = (term_down[i]/dz_down[i] - term_down_crho[i]/dz_down[i]) + + Xdata.cos_sl*((k_np1_m_im12[i]/rho_down)*(z[i]-0.5*(dz_down[i]))) * (drho_down); + break; + } + case DENSITY_GRAVITY: + { + //Separate term constant density (1) and term dependent on density (2) + Salinity.flux_up[i] = term_up[i]/dz_up[i] + Xdata.cos_sl*(k_np1_m_ip12[i]*rho[i]); + Salinity.flux_up_2[i] = + + Xdata.cos_sl*(k_np1_m_ip12[i]*(rho_up-rho[i])) + + Xdata.cos_sl*(k_np1_m_ip12[i]*(z[i]+0.5*(dz_[i]))-k_np1_m_im12[i]*(z[i]-0.5*(dz_[i]))) * (drho_up); + + Salinity.flux_down[i] = term_down[i]/dz_down[i] + Xdata.cos_sl*(k_np1_m_im12[i]*rho[i]); + Salinity.flux_down_2[i] = + + Xdata.cos_sl*(k_np1_m_im12[i]*(rho_down-rho[i])) + + Xdata.cos_sl*(k_np1_m_ip12[i]*(z[i]+0.5*(dz_[i]))-k_np1_m_im12[i]*(z[i]-0.5*(dz_[i]))) * (drho_down); + break; + } + } + if(i==uppernode) { + Salinity.flux_up[uppernode]+=Salinity.flux_up_2[uppernode]; + Salinity.flux_up_2[uppernode]=0.; + } + if(i==lowernode) { + Salinity.flux_down[lowernode]+=Salinity.flux_down_2[lowernode]; + Salinity.flux_down_2[lowernode]=0.; + } + } + + + // r_mpfd is an approximation of how far one is away from the solution. So in case of Dirichlet boundaries, we are *at* the solution: + if(aTopBC==DIRICHLET) r_mpfd[uppernode]=0.; + if(aBottomBC==DIRICHLET) r_mpfd[lowernode]=0.; + + + // return the right hand side vector + return r_mpfd; +} + + + /** * @brief Solve Richards Equation \n * Solve Richards Equation \n * @author Nander Wever - * @param Xdata - * @param Sdata + * @param Xdata SnowStation object for which Richards Equation should be solved + * @param Sdata SurfaceFluxes object to store fluxes + * @param ql latent heat flux that should be considered as evaporation in the upper boundary condition + * @param date Date for wich the time step is executed, to provide a helpful error message when problems occur */ -void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) +void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql, const mio::Date& date) { +// Main publications about the development of this code: +// - Wever, N., Fierz, C., Mitterer, C., Hirashima, H., and Lehning, M.: Solving Richards Equation for snow improves snowpack meltwater runoff estimations in detailed multi-layer snowpack model, The Cryosphere, 8, 257-274, doi:10.5194/tc-8-257-2014, 2014. +// First publication describing the implementation of Richards equation in SNOWPACK. +// - Wever, N., Schmid, L., Heilig, A., Eisen, O., Fierz, C., and Lehning, M.: Verification of the multi-layer SNOWPACK model with different water transport schemes, The Cryosphere, 9, 2271-2293, doi:10.5194/tc-9-2271-2015, 2015. +// In-depth model verification and description of the soil part with Richards equation (soil properties and soil phase changes). +// - Wever, N., Würzer, S., Fierz, C., and Lehning, M.: Simulating ice layer formation under the presence of preferential flow in layered snowpacks, The Cryosphere, 10, 2731-2744, doi:10.5194/tc-10-2731-2016, 2016. +// Describes the preferential flow implementation via the dual-domain approach. +// // Main references used to write this code, as a reference to understand the working of this code: // - Celia, M, A., Bouloutas, E.T., Zabra, R.L. (1990) A general mass-conservative numerical solution for the unsaturated flow equation Water Resources Research (26:7), 1483-1496. // Describes the main part of the solver (Picard iteration of the mixed form of the Richards equation, see eq. 17 in that paper) @@ -623,74 +691,48 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) // - Rathfelder, K and Abriola, L. (1994) Mass conservative numerical solutions of the head-based Richards equation. Water Resources Research (30:9) 2579-2586. // Describes an implementation of variable grid spacing for solving Richards Equation in 1D. - -// PROBLEM SOLVER GUIDE: // KNOWN ISSUES: -// - When using Richars-Equation, the new energy conservative PhaseChange-schemes may cause snow temperatures to be above 273.15K. As long as they are transient, it should not considered -// to be a problem. Future optimization here may be possible. It's likely related to the fact that when solving Richards-Equation, basically every snow layer has some amount of water in it, -// albeit very tiny. But this causes some difficulties in determining whether snow is wet or dry, so whether the nodes are at melting temperature. +// - When using Richars-Equation, the PhaseChange-scheme may cause snow temperatures to be above 273.15K. As long as they are transient, it should not considered +// to be a problem. Future optimization here may be possible. It's likely related to the fact that when solving Richards Equation, basically every snow layer has some amount +// of water in it, albeit very little. But this causes some difficulties in determining whether snow is wet or dry, so whether the nodes are at melting temperature. // - In case of floating point exceptions: ReSolver1d has some problems when (in CMake) DEBUG_ARITHM is set to ON. You can safely set it to OFF, as the code detects for // illegal operations itself and takes appropriate measures, like choosing another solver or reducing the time step. // - In case of non-convergence of the solver: Numerical problems were found when the SNOWPACK time step is larger than 15 minutes. For example caused by the settling routine, // which is based on 15 minute time steps. So before digging further in the problem, make sure you run SNOWPACK with 15 minute time steps. -// - Evaporation from soil in dry limit. This cause numerical troubles, but it is also not realistic to force a certain amount of evaporation from a near-dry soil (the water +// +// A lot of problems arise from non convergence in the solver and very small time steps. A common reason for this is filling of the model domain. Clear examples: +// - Evaporation from soil in dry limit. This causes numerical troubles, but it is also not realistic to force a certain amount of evaporation from a near-dry soil (the water // is just not there!). Set LIMITEDFLUXEVAPORATION or LIMITEDFLUX as top boundary condition to be safe. // - Infiltration in soil in wet limit. This can cause numerical trouble, but it is also not realistic. You cannot put more water in the domain then there is room for. -// So for example: never use 10 cm of soil with DIRICHLET lower boundary condition and NEUMANN on top. Set LIMITEDFLUXINFILTRATION or LIMITEDFLUX as lower boundary condition to be safe. -// -// SOME DEEPER LYING PROBLEMS: -// - In case of floating point exceptions with DEBUG_ARITHM set to OFF: -// - either pressure head is blowing up to extremely high values, before rewinds can be done. This can happen if you change MAX_ALLOWED_DELTA_H, but also make sure that the time step is still larger than the minimum allowed time step. -// If no rewinds can be done anymore, because the time step is already too small, the solver is just trying until it is killed. -// - In case of other exceptions, see the error message on screen. Messages from the solver: if info-value is positive, the number indicates quite often the element that is having illegal value, like nan, or inf. This can be a result from very small time steps (check dt), or else it is a bug. -// - Problems can be caused when the state of the snowcover changes rapidly between calls to ReSolver1d. For example, it was found that when SNOWPACK was run with 60 minute -// time steps, the settling was so fast that elements collapsed, producing saturated snow layers. Then ReSolver1d was not able to solve the equation anymore with reasonable time steps. -// - A lot of problems arise from non convergence in the solver and very small time steps. A common reason for this is filling of the model domain. Clear examples: -// - 10cm soil with saturated lower boundary condition. The soil will saturate quickly. No melt water can infilitrate the soil anymore, but starts ponding. In reality, such cases would lead to a water layer, or overland flow. -// - Strong infiltration rates (like high precipitation rates) -// - High values in the source term -// In these cases the model just cannot do anything with all the water... -// - The model is still running and producing sensible results, but is having a very small time step. -// First of all, I found that only numerous subsequent timesteps of <1s. should not be accepted and there is something "wrong" with the code. Else, just leave the code running. Note: you cannot really say wrong, as it is still finding converging solutions. -// Try to understand why the time step rewinds are done, maybe by even enabling the Level3 output. And using less to search the output with "less" and search for "rewind". -// - Is there a massbalance problem? Then probably the fluxes at the top and/or bottom are not correctly calculated, or there is a real bug. -// Massbalance errors also arise when k_ip12(i-1) != k_im12(i)! The nodal values should always be the same for both the upper and lower node! -// - Is there a very strong gradient in pressure head, for example at the new snow layer? What is the value for h_d, is it very small? Then maybe limit the range over which the Van Genuchten parameters can vary (limiting grain size for example for snow). -// +// Typically this may occur with strong infiltration rates (high precipitation rates) or large values in the source/sink term (potentially from the canopy module). +// So for example: never use 10 cm of soil with DIRICHLET lower boundary condition and NEUMANN on top. The soil then may saturate very quickly. No melt water can infilitrate +// the soil anymore, but starts ponding. In reality, such cases would lead to a water layer, or overland flow, which is not considered in SNOWPACK (yet). +// Set LIMITEDFLUXINFILTRATION or LIMITEDFLUX as lower boundary condition to be safe. + // TODO IN FUTURE DEVELOPMENT -// - Implement a strategy what to do with the rejected infilitrating water in case of LIMITEDFLUX and LIMITEDFLUXINFILTRATION. Either built-up a water layer (theta[WATER]==1) on top (real ponding), +// - Implement a strategy what to do with the rejected infiltrating water in case of LIMITEDFLUX and LIMITEDFLUXINFILTRATION. Either built-up a water layer (theta[WATER]==1) on top (real ponding), // or write it out in a kind of overland flow variable. - //Initializations - enum RunCases{UNIFORMSOIL, IMISDEFAULT, WFJ, CDP, SNOFILE}; + // define if matrix or preferential flow + const int WATERINDEX = (matrix == true) ? (WATER) : (WATER_PREF); // // BEGIN OF SETTINGS // - const RunCases runcase = SNOFILE; //Defines what the soil looks like. Recommended: SNOFILE, soil type based on grain size in sno file. - const BoundaryConditions TopBC = LIMITEDFLUX; //Bottom boundary condition (recommended choice is LIMITEDFLUX, so too much evaporation from dry soil or snow or too much infilitration in wet soil is prohibited). + const BoundaryConditions TopBC = LIMITEDFLUX; //Top boundary condition (DIRICHLET, NEUMANN or LIMITEDFLUX). Recommended: LIMITEDFLUX, i.e. too much evaporation from dry soil or snow or too much infilitration in wet soil or snow is prohibited. //In case you select one of the LIMITEDFLUX options, specify whether these are only for soil, for snow or for both: const bool LIMITEDFLUXEVAPORATION_soil=true; const bool LIMITEDFLUXEVAPORATION_snow=true; const bool LIMITEDFLUXINFILTRATION_soil=true; const bool LIMITEDFLUXINFILTRATION_snow=true; const bool LIMITEDFLUXINFILTRATION_snowsoil=true; //This switch allows to limit the infiltration flux from snow into soil, when the snowpack is solved with the Bucket or NIED water transport scheme. - const bool AllowSoilFreezing=true; //true: soil may freeze. false: all ice will be removed (if any ice present) and no ice will form. - const bool ApplyIceImpedance=false; //Apply impedance on hydraulic conductivity in case of soil freezing. See: Zhao et al. (1997) and Hansson et al. (2004) [Dall'Amicao, 2011]. - const VanGenuchten_ModelTypesSnow VGModelTypeSnow=YAMAGUCHI2012; //(Recommended: YAMAGUCHI2012) Set a VanGenuchten model for snow (relates pressure head to theta and vice versa) - const bool alpine3d=false; //Flag for alpine3d simulations. Note: this flag is not necessary to set, but it will enforce some precautions to provide extra numerical stability (at the cost of small mass balance violations). + const bool LIMITEDFLUXSOURCESINKTERM=true; //Check if the source/sink term can be absorbed by the matrix. //Setting some program flow variables const bool SafeMode=true; //Enable safemode only when necessary, for example in operational runs or Alpine3D simulations. It rescues simulations that do not converge, at the cost of violating mass balance. - const bool WriteOutNumerics_Level0=false; //true: after time step, some summarizing numerics information is printed - const bool WriteOutNumerics_Level1=false; //true: per iteration, some basic numerics information is printed (not so much output, but still is useful for debugging) - const bool WriteOutNumerics_Level2=false; //true: only initial conditions and subsequent convergence information is printed during each time step (it's quite some output, but helps debugging) - const bool WriteOutNumerics_Level3=false; //true: per iteration, highly detailed layer and solver is printed (it's really a lot of output, but helps debugging the most difficult problems) + const bool WriteDebugOutput=false; //true: debugging output is printed - const bool AllowDrySnowLayers=false; //true: snow layers can be dry (theta=0.) false: snow layers will always be at least theta=theta_r. The necessary water is created by melting ice. - const bool AllowDrySoilLayers=false; //true: soil layers can be dry (theta=0.) false: soil layers will always be at least theta=theta_r. The necessary water is created out of nothing! - // Will destroy mass balance! Setting this flag true for soil layers is not recommended as it will not be really physical plausible. //Set the defaults based on whether CLAPACK is available #ifdef CLAPACK @@ -706,10 +748,11 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) SOLVERS ActiveSolver=PreferredSolver; //Set the ActiveSolver to the PreferredSolver. This is because the code tries to prevent "difficult" matrices to be solved by the DGTSV or TDMA algorithm, so we should be able to switch temporarily to another solver. - //Set parameterization for hydraulic conductivity - const K_Parameterizations K_PARAM=CALONNE; // Implemented choices: SHIMIZU, CALONNE, based on Shimizu (1970) and Calonne (2012). - + //Set parameterization for hydraulic functions for snow + const vanGenuchten::VanGenuchten_ModelTypesSnow VGModelTypeSnow=vanGenuchten::YAMAGUCHI2012; //[Water retention curve] Recommended: YAMAGUCHI2012 Set a VanGenuchten model for snow (relates pressure head to theta and vice versa) + const vanGenuchten::K_Parameterizations K_PARAM=vanGenuchten::CALONNE; //[Hydraulic conductivity] Recommended: CALONNE Implemented choices: CALONNE, KOZENYCARMAN and SHIMIZU, based on Kozeny (1927), Calonne (2012) and Shimizu (1970), respectively. + const SalinityMixingModels SALINITY_MIXING = NONE; // @@ -717,26 +760,24 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) // WARNING: Below this line, changes to initializations are likely to break the code! // - //Setting convergence criteria and numerical limits - const double REQUIRED_ACCURACY_H=1E-3; //Required accuracy for the Richard solver: this is for the delta h convergence criterion - const double REQUIRED_ACCURACY_THETA=1E-5; //Required accuracy for the Richard solver: this is for the delta theta convergence criterion. It is recommended to adjust PhaseChange::RE_theta_r in PhaseChanges.cc in case this value is changed. - //Huang et al. (1996) proposes 0.0001 here (=1E-4). 1E-4 causes some mass balance problems. Therefore, it is set to 1E-5. - const double convergencecriterionthreshold=0.99;//Based on this value of theta_dim, either theta-based convergence is chosen, or h-based. Note we need to make this destinction, beacuse theta-based does not work close to saturation or with ponding. - const double MAX_ALLOWED_DELTA_H=1E32; //Set an upper threshold for the delta_h[i] that is allowed. The idea is that when delta_h for an iteration is too large, we have a too large time step and a rewind is necessary. - const int INCR_ITER=5; //Number of iterations for the Richard solver after which time step is increased. - const int DECR_ITER=10; //Number of iterations for the Richard solver after which time step is decreased. - const int MAX_ITER=15; //Maximum number of iterations for the Richard solver. - const double MIN_VAL_TIMESTEP=1E-12; //Minimum time step allowed in Richards solver. Don't set this too low (let's say 1E-40), becuase the calculations are then done at the limits of the floating point precision. - const double MAX_VAL_TIMESTEP=900.; //Maximum time step allowed in Richards solver. - const double MAX_VAL_TIMESTEP_FOR_SNOW=900.; //Maximum time step allowed in Richards solver when there are snow layers in the domain. - const size_t BS_MAX_ITER=5000; //Maximum allowed number of iterations in the soil-freezing algorithm. - const double SF_epsilon=1E-4; //Required accuracy for the root finding algorithm when solving soil freezing/thawing. + + //Check for water layer (presence of a pond) and add it to the surfacefluxrate: + double backupWATERLAYER_Te = Constants::undefined; + if(allow_surface_ponding == true && Xdata.getNumberOfElements() > Xdata.SoilNode) { + if(Xdata.Edata[Xdata.getNumberOfElements()-1].theta[ICE] == 0. && Xdata.Edata[Xdata.getNumberOfElements()-1].theta[SOIL] == 0.) { + surfacefluxrate += (Xdata.Edata[Xdata.getNumberOfElements()-1].theta[WATER] * Xdata.Edata[Xdata.getNumberOfElements()-1].L) / sn_dt; + backupWATERLAYER_Te = Xdata.Edata[Xdata.getNumberOfElements()-1].Te; + Xdata.reduceNumberOfElements(Xdata.getNumberOfElements()-1); + } else { + backupWATERLAYER_Te = Constants::undefined; + } + } + //Initializing and defining Richards solver time domain - const double snowpack_dt = sn_dt; //Time step of SNOWPACK (in seconds) double dt=10.; //Set the initial time step for the Richard solver (in seconds). This time step should be smaller or equal to the SNOWPACK time step. bool boolFirstFunctionCall; //true: first execution of this function, false: not the first execution of this function - if (Xdata.ReSolver_dt>0.) { //Retrieve last dt used in last performed time step. Note Xdata.SoliNumerics_dt<0 when initialized + if (Xdata.ReSolver_dt>0.) { //Retrieve last dt used in last performed time step. Note Xdata.ReSolver_dt<0 when initialized boolFirstFunctionCall=false; //Subsequent call to ReSolver1d dt=Xdata.ReSolver_dt; //Set time step to last used time step } else { //else it is the first time this function is called. @@ -748,55 +789,30 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Initializing and defining Richards solver space domain const size_t nN=Xdata.getNumberOfNodes(); //Number of nodes const size_t nE=nN-1; //Number of layers - const double cos_sl = Xdata.cos_sl; //Slope cosinus, giving cos_sl=1 for flat field. vector& EMS = Xdata.Edata; //Create reference to SNOWPACK elements. vector& NDS = Xdata.Ndata; //Create reference to SNOWPACK nodes. - std::vector dz(nE, 0.); //Layer height (in meters) - std::vector z(nE, 0.); //Height above the surface (so -1 is 1m below surface) - std::vector dz_up(nE, 0.); //Distance to upper node (in meters) - std::vector dz_down(nE, 0.); //Distance to lower node (in meters) - std::vector dz_(nE, 0.); //Layer distance for the finite differences, see Rathfelder (2004). - int uppernode=-1; //Upper node of Richards solver domain - int lowernode=-1; //Lower node of Richards solver domain - std::vectorSnowpackElement(nE,0); //Dictionary between snowpack domain and Richards solver domain. SnowpackElement[j]=i means layer j in Richards solver is layer i in snowpack domain. - //Then, using EMS[SnowpackElement[j]], we can refer to the SNOWPACK domain from the Richards solver domain. - int toplayer; //highest layer (top of snowpack, or top of soil in case of no soil) - const int nsoillayers_snowpack=int(Xdata.SoilNode); //where does the soil start? Note, when toplayer is set to nsoillayers_snowpack, only soil is treated with Richards equation. HACK, TODO: remove type inconstency in comparison - //Note here that Xdata.SoilNode denotes first element as 0, so Xdata.SoilNode=4 denotes 4 soil layers. - int nsoillayers_richardssolver=0; - if(iwatertransportmodel_snow != RICHARDSEQUATION) { //RE only for soil - toplayer=nsoillayers_snowpack; //toplayer=nE: all layers are treated by richards equation, toplayer=nsoillayers_snowpack: only soil. - } else { //RE for both snow and soil - toplayer=int(nE); //toplayer=nE: all layers are treated by richards equation, toplayer=nsoillayers_snowpack: only soil. HACK, TODO: remove type inconstency in comparison - } - if(toplayer==0) return; //Nothing to do here! - if(nsoillayers_snowpack==1) { - // Running Richards equation solver with only 1 layer results in a segmentation fault, because out of bound indices with heat advection. - // Note: resolving this issue is not planned for, but may be addressed in the future. Anyway, it shouldn't be a big issue to have two soil layers or more instead of one. - prn_msg( __FILE__, __LINE__, "err", Date(), "The implementation of RICHARDSEQUATION requires two or more soil layers, whereas now only one soil layer is present. Please change your initialization."); - throw; - } + + if ((nE == 0) || (iwatertransportmodel_snow != RICHARDSEQUATION && Xdata.SoilNode == 0)) return; //Nothing to do here! + const size_t uppernode = (iwatertransportmodel_snow != RICHARDSEQUATION) ? (Xdata.SoilNode - 1) : (nE - 1); //highest layer (top of snowpack, or top of soil in case of no soil) + size_t lowernode=0; //Lower node of Richards solver domain //Initializations of the convergence criteria - int trigger_layer_accuracy=-1; //At which layer the accuracy was not reached. double track_accuracy_h=0.; //This variable tracks the accuracy of convergence for all h-convergence based layers. double track_accuracy_theta=0.; //This variable tracks the accuracy of convergence for all theta-convergence based layers. double max_delta_h=0.; //Tracks max_delta_h, to determine if our time step is too large. Note: this is different from checking the convergence criterion. This is just to check the time step. If a too large time step is used, big values of delta_h may arise, which cause pressure head to hit the singularities for dry and wet soil, and causes problems with the power functions in the Von Genuchten-Mualem model. - int track_trigger_layer_accuracy=-1; //This variable tracks the layer were the accuracy is smallest (this means: most difficulty in converging). bool boolConvergence=false; //true: convergence is reached, false: convergence not reached double mass1=0, mass2=0, massbalanceerror=0.; //Mass balance check variables. - const double maxallowedmassbalanceerror=1E-10; //This value is carefully chosen. It should be considered together with REQUIRED_ACCURACY_THETA and REQUIRED_ACCURACY_H + const double maxallowedmassbalanceerror=1E-6; //This value is carefully chosen. It should be considered together with REQUIRED_ACCURACY_THETA and REQUIRED_ACCURACY_H double massbalanceerror_sum=0.; //Sum of mass balance error over snowpack time step. //Initializations for summarizing statistics and some supporting variables, like indices, counters, etc. double accuracy=0.; //Keeps track of reached accuracy. - int niter=0; //Counts iterations within one time step of the Richards solver - // Commented to remove set but not used compiler warning - // int niter_snowpack_dt=0; //Counts iterations within one time step of the SNOWPACK time domain - int niter_nrewinds=0; //Counts number of rewinds (i.e. a solution was not found and it is tried again with a smaller time step) - int niter_seqrewinds=0; //Counts number of sequential rewinds. We then decrease the time step more, when we encounter sequential rewinds. - int seq_safemode=0; //Counts the number of sequential SafeMode actions + unsigned int niter=0; //Counts iterations within one time step of the Richards solver + unsigned int niter_snowpack_dt=0; //Counts iterations within one time step of the SNOWPACK time domain + unsigned int niter_nrewinds=0; //Counts number of rewinds (i.e. a solution was not found and it is tried again with a smaller time step) + unsigned int niter_seqrewinds=0; //Counts number of sequential rewinds. We then decrease the time step more, when we encounter sequential rewinds. + unsigned int seq_safemode=0; //Counts the number of sequential SafeMode actions //Numerical performance statistics double stats_min_dt=MAX_VAL_TIMESTEP; //Minimum RE time step in SNOWPACK time step, initialized in a way that the comparison will always work. double stats_max_dt=0.; //Maximum RE time step in SNOWPACK time step, initialized in a way that the comparison will always work. @@ -806,26 +822,23 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) size_t bs_stats_totiter=0; //Soil freezing/thawing solver: total number of iterations over all layers over the SNOWPACK time step, size_t bs_stats_maxiter=0; //Soil freezing/thawing solver: maximum number of iterations in a certain layers over the SNOWPACK time step. //Counters, etc. - int i, j, k; //Counters for layers + size_t i, j, k; //Counters for layers const size_t nmemstates=1; //Number of memory states, used to store changes of delta_h between iterations. Currently not used, but possible use is to check if delta_h is blowing up. int memstate=0; //Keeping track of the current memory index - double h_d=0.; //Lower limit for pressure head: definition of "completely dry". This value will determined later on. + double h_d=0.; //Lower limit for pressure head: definition of "completely dry". This value will be determined later. + double min_theta=Constants::eps; //Initializing and declaring boundary conditions and flux variables BoundaryConditions aTopBC; //Actual applied top boundary condition (can only be either Dirichlet or Neumann, as all the others can be translated in an application of either one of those two.) BoundaryConditions aBottomBC; //Actual applied bottom boundary condition (can only be either Dirichlet or Neumann, as all the others can be translated in an application of either one of those two.) double htop=0., TopFluxRate=0.; //Dirichlet (constant head) and Neumann (constant flux) upper boundary values respectively. - double h_d_uppernode=0.; //Used for LIMITEDFLUXEVAPORATION boundary condition. + double h_d_uppernode=h_d; //Used for LIMITEDFLUXEVAPORATION boundary condition. double hbottom=0., BottomFluxRate=0.; //Dirichlet (constant head) and Neumann (constant flux) lower boundary values respectively. double actualtopflux=0; //Stores the actual applied flux through top (positive is inflow). - double actualtopfluxcheck=0.; //Stores the water change in the top element + outflow to lower layers to derive the input flux at the surface. double refusedtopflux=0; //Stores the difference in flux that was requested, but could not be applied double actualbottomflux=0; //Stores the actual flux through the bottom (positive is outflow). - double snowsoilinterfaceflux1=0.; //Stores the actual flux through the soil-snow interface (positive is flow into soil). - double snowsoilinterfaceflux2=0.; //Stores the actual flux through the soil-snow interface (positive is flow into soil). - double snowsoilinterfaceflux_before=0.; //Stores the actual flux through the soil-snow interface at the beginning of a time step (positive is flow into soil). - double snowsoilinterfaceflux_after=0.; //Stores the actual flux through the soil-snow interface at the end of a time step (positive is flow into soil). + double snowsoilinterfaceflux=0.; //Stores the actual flux through the soil-snow interface (positive is flow into soil). double totalsourcetermflux=0.; //Stores the total applied source term flux (it's a kind of boundary flux, but then in the middle of the domain). //Declare all numerical arrays and matrices: @@ -839,9 +852,10 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) std::vector delta_Te_i(nE, 0.); //Change in element temperature per iteration time step due to soil freezing/thawing. std::vector delta_Te_adv(nE, 0.); //Change in element temperature per time step due to heat advection by the water flow. std::vector delta_Te_adv_i(nE, 0.); //Change in element temperature per iteration time step due to heat advection by the water flow. + std::vector rho(nE, 0.); //Liquid density //std::vector > a(nE, std::vector (nE, 0)); //Left hand side matrix. Note, we write immediately to ainv! But this is kept in to understand the original code. - std::vector ainv(nE*nE, 0.); //Inverse of A, written down as a 1D array instead of a 2D array, with the translation: a[i][j]=ainv[i*nsoillayers_snowpack+j] + std::vector ainv(nE*nE, 0.); //Inverse of A, written down as a 1D array instead of a 2D array, with the translation: a[i][j]=ainv[i*nlayers+j] std::vector ad(nE, 0.); //The diagonal of matrix A, used for DGTSV std::vector adu(nE, 0.); //The upper second diagonal of matrix A, used for DGTSV std::vector adl(nE, 0.); //The lower second diagonal of matrix A, used for DGTSV @@ -852,49 +866,26 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) std::vector h_n(nE, 0.); //Pressure head at beginning of time step dt. Used to determine delta_h_dt, to better forecast value for next time step. std::vector s(nE, 0.); //Source/sink in terms of theta [m^3/m^3/s]. std::vector C(nE, 0.); //Water capacity function. Specific moisture capacity (dtheta/dh), see Celia et al., (1990). - std::vector ksat(nE, 0.); //Soil property. Saturation hydraulic conductivity. std::vector K(nE, 0.); //Hydraulic conductivity function std::vector impedance(nE, 0.); //Impedance factor due to ice formation in matrix (see Dall'Amico, 2011); std::vector Se(nE, 0.); //Effective saturation, sometimes called dimensionless volumetric water content. - std::vector term_up(nE, 0.); //Variable to support construction of the R.H.S. (R_mpfd in Celia et al., 1990). - std::vector term_down(nE, 0.); //Variable to support construction of the R.H.S. (R_mpfd in Celia et al., 1990). std::vector r_mpfd(nE, 0.); //R_mpfd (see Celia et al, 1990). std::vector r_mpfd2(nE, 0.); //Copy of R_mpfd, used for DGTSV. Note: R_mpfd2 is overwritten by DGTSV, so we need a copy. - std::vector deficit_vector(nE, 0.); //Deficit vector of solution - double deficit_vector_norm=0.; //2nd norm of deficit vector std::vector h_np1_mp1(nE, 0.); //Pressure head for the solution time step in the next iteration std::vector theta_np1_m(nE, 0.); //Theta for the solution time step in the current iteration. std::vector theta_np1_mp1(nE, 0.); //Theta for the solution time step in the next iteration. std::vector theta_n(nE, 0.); //Theta at the current time step. std::vector theta_d(nE, 0.); //There is a singularity for dry soils, at theta=theta_r. There h -> OO. So we limit this. We define a pressure head that we consider "dry soil" (h_d) and then we calculate what theta belongs to this h_d. - std::vector alpha(nE, 0.); //Soil property in Van Genuchten model. [m^-1] - std::vector n(nE, 0.); //Soil property in Van Genuchten model. - std::vector m(nE, 0.); //Soil property in Van Genuchten model. - std::vector h_e(nE, 0.); //Soil property, air entry pressure, see Ippisch (2006) for details. - std::vector theta_r(nE, 0.); //Soil property, residual water content. - std::vector theta_s(nE, 0.); //Soil property, saturation water content. - std::vector Sc(nE, 0.); //Saturation at cut-off point h_e (see Ippisch et al (2006)). + std::vector theta_i_n(nE, 0.); //Soil state, ice content at the beginning of the time step. Volumetric water content and NOT liquid water equivalent! std::vector theta_i_np1_m(nE, 0.); //Soil state, ice content at the beginning of the current iteration. Volumetric water content and NOT liquid water equivalent! std::vector theta_i_np1_mp1(nE, 0.); //Soil state, ice content at the next iteration. Volumetric water content and NOT liquid water equivalent! - std::vector activelayer(nE, true); //true: layer is active participating in matrix flow (Richards equation). false: layer is inactive (too dry, or ice layer) std::vector dT(nE, 0.); //Stores the energy needed to create theta_r from the ice matrix. std::vector snowpackBACKUPTHETAICE(nE, 0.); //Backup array for the initial SNOWPACK theta ice - std::vector snowpackBACKUPTHETAWATER(nE, 0.); //Backup array for the initial SNOWPACK theta water - std::vector wateroverflow(nE, 0.); //Array for all the water that is >theta_s (m^3/m^3)]. This water is just thrown away in the model and is a leak in the mass balance. - - //For soil freezing/thawing - std::vector T_melt(nE, 0.); //Contains the freezing point depression due to unsaturated conditions (K) - const double T_0=Constants::freezing_tk; //Freezing temperature of water at atmospheric pressure (K) - const double delF=Constants::lh_fusion; //Heat associated with freezing - //Prevent buffering on the stdout when we write debugging output. In case of exceptions (program crashes), we don't loose any output which is still in the buffer and we can better track what went wrong. - if(WriteOutNumerics_Level1==true || WriteOutNumerics_Level2==true || WriteOutNumerics_Level3==true) { - setvbuf(stdout, NULL, _IONBF, 0); - } - + if(WriteDebugOutput) setvbuf(stdout, NULL, _IONBF, 0); #ifdef DEBUG_ARITHM if(boolFirstFunctionCall==true) { @@ -903,491 +894,212 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) } #endif - //Backup SNOWPACK state - for (i = 0; i1.) { - std::cout << "WARNING: very strange, theta[ICE]>1 at layer " << i << "/" << nE << " (" << EMS[i].theta[ICE] << ")\n"; - EMS[i].theta[ICE]=1.; - } - //Do the basic check of the sum of element contents. - const double sum=EMS[i].theta[AIR] + EMS[i].theta[WATER] + EMS[i].theta[ICE] + EMS[i].theta[SOIL]; - if((sum>1.+Constants::eps || sum<1.-Constants::eps) && (boolFirstFunctionCall!=true)) { - std::cout << "WARNING: very strange, sum of element contents != 1. (but " << sum << ") at layer " << i << "/" << nE << ". Values scaled.\n"; - //Note: we do not scale theta[SOIL]. - const double correction_factor=(EMS[i].theta[AIR] + EMS[i].theta[WATER] + EMS[i].theta[ICE])/(1.-EMS[i].theta[SOIL]); - EMS[i].theta[AIR]/=correction_factor; - wateroverflow[i]+=(EMS[i].theta[ICE]-(EMS[i].theta[ICE]/correction_factor))*(Constants::density_ice/Constants::density_water); //We just throw away the ice, without considering melting it. - EMS[i].theta[ICE]/=correction_factor; - wateroverflow[i]+=EMS[i].theta[WATER]-(EMS[i].theta[WATER]/correction_factor); - EMS[i].theta[WATER]/=correction_factor; - } else { - if(boolFirstFunctionCall==true) { - EMS[i].theta[AIR]=1.-EMS[i].theta[SOIL]-EMS[i].theta[ICE]-EMS[i].theta[WATER]; - } - } - - if (WriteOutNumerics_Level2==true) - std::cout << "RECEIVING at layer " << i << ": sum=" << sum << std::fixed << std::setprecision(15) << " air=" << EMS[i].theta[AIR] << " ice=" << EMS[i].theta[ICE] << " soil=" << EMS[i].theta[SOIL] << " water=" << EMS[i].theta[WATER] << " Te=" << EMS[i].Te << "\n"<< std::setprecision(6) ; - - //In case we don't want to allow soil to freeze, melt all ice that is there: - if(AllowSoilFreezing==false && i0.) { //If we are in soil, and have ice, but don't allow soil freezing, melt all the ice. - if(i>0) { //Because we now work with Dirichlet BC at the lower boundary, we cannot increase water content. So then, increase air content. - EMS[i].theta[WATER]+=EMS[i].theta[ICE]*(Constants::density_ice/Constants::density_water); - } else { - EMS[i].theta[AIR]+=EMS[i].theta[ICE]; - } - const double deltaT=(-1.*EMS[i].theta[ICE]) / ((EMS[i].c[TEMPERATURE] * EMS[i].Rho) / ( Constants::density_ice * Constants::lh_fusion )); - EMS[i].Te+=deltaT; - - if(i==int(nE)-1 && i>=0) { //HACK, TODO: remove type inconstency in comparison - NDS[i+1].T+=deltaT; - NDS[i].T+=deltaT; - } - - EMS[i].Qmf += (-1.*EMS[i].theta[ICE] * Constants::density_ice * Constants::lh_fusion) / snowpack_dt; // Units: [W m-3] - EMS[i].theta[ICE]=0.; - //And now update state properties. - EMS[i].Rho = (EMS[i].theta[ICE] * Constants::density_ice) + (EMS[i].theta[WATER] * Constants::density_water) + (EMS[i].theta[SOIL] * EMS[i].soil[SOIL_RHO]); - EMS[i].M=EMS[i].L*EMS[i].Rho; - } - - //Make backup of incoming values for theta[ICE] and theta[WATER]. This is used in case we allow dry snow layers *AND* MIN_VAL_THETA_SNOWPACK > 0., to determine original water content and how much water was added to the domain. - snowpackBACKUPTHETAICE[i]=EMS[i].theta[ICE]; - snowpackBACKUPTHETAWATER[i]=EMS[i].theta[WATER]; + if(boolFirstFunctionCall==true && matrix == true && enable_pref_flow == true && K_AverageType != GEOMETRICMEAN) { + // Warn if enable_pref_flow == true and the K_AverageType != GEOMETRICMEAN. + // Dual domain approach was designed using GEOMETRICMEAN and other combinations are unlikely to be made on purpose. + prn_msg(__FILE__, __LINE__, "wrn", Date(), "PREF_FLOW = TRUE is expecting (for the matrix part) AVG_METHOD_HYDRAULIC_CONDUCTIVITY = GEOMETRICMEAN!"); } - - //Domain initialization (this needs to be done every time step, as snowpack layers will settle and thereby change height. - //i=0 is bottom layer, and toplayer-1 is top layer. - double heightshift=0.; //heightshift can be used to shift the vertical domain up and down. - double totalheight=0.; //tracking the total height of the column - j=0; //Reset Richards-solver domain layer count - for (i = 0; insoillayers_snowpack-1) { //We are in snow, and create n sublayers per snow layer - const int nsublayers=1; //Specify number of sublayers - for(k=0; k 0. && EMS[i].theta[WATERINDEX] < min_theta) min_theta = EMS[i].theta[WATERINDEX]; } - dz_down[lowernode]=totalheight-tmpheight1; - dz_up[uppernode]=totalheight-tmpheight2; - if(WriteOutNumerics_Level2==true) std::cout << "SLOPE: " << std::fixed << std::setprecision(15) << cos_sl << "\n" << std::setprecision(6); + // Grid initialization (this needs to be done every time step, as snowpack layers will settle and thereby change height) + InitializeGrid(EMS, lowernode, uppernode); - //Now set van Genuchten parameter for each layer - h_d=0.; //Set definition of pressure head of completely dry to zero, we will determine it in the next loop. - double tmpheight=0.; - for (i=uppernode; i >= 0; i--) { //Go from top to bottom in Richard solver domain - if ( SnowpackElement[i] >= nsoillayers_snowpack) { //Snow, assuming that the use of sublayers (higher resolution) is only used in snow. TODO: this has to be rewritten more nicely!! - const double max_allowed_ice=0.95; //An ice pore space of 5% is a reasonable value: K. M. Golden et al. The Percolation Phase Transition in Sea Ice, Science 282, 2238 (1998), doi: 10.1126/science.282.5397.2238 - if(EMS[SnowpackElement[i]].theta[ICE]>max_allowed_ice) { + //Now set hydraulic properties for each layer + h_d=0.; //Set definition of pressure head of completely dry to zero, we will determine it in the next loop. + for (i = lowernode; i <= uppernode; i++) { + if ( i >= Xdata.SoilNode ) { //Snow + if(EMS[i].theta[ICE]>max_theta_ice) { //Pure ice layers are a problem for Richards equation (of course...), so we limit the volumetric ice content to 99 %. - const double tmp_missing_theta=(EMS[SnowpackElement[i]].theta[ICE]-max_allowed_ice)*(Constants::density_ice/Constants::density_water); //Not too dry (original) - dT[SnowpackElement[i]]+=tmp_missing_theta*(Constants::density_water/Constants::density_ice) / ((EMS[SnowpackElement[i]].c[TEMPERATURE] * EMS[SnowpackElement[i]].Rho) / ( Constants::density_ice * Constants::lh_fusion )); - std::cout << "[W] ReSolver1d.cc: ICE LAYER --> WATER CREATED (" << tmp_missing_theta << "): i=" << i << " --- dT=" << dT[SnowpackElement[i]] << " T=" << EMS[SnowpackElement[i]].Te << " theta[WATER]=" << EMS[SnowpackElement[i]].theta[WATER] << " theta[ICE]=" << EMS[SnowpackElement[i]].theta[ICE] << "\n"; - EMS[SnowpackElement[i]].theta[WATER]+=0.99*tmp_missing_theta; //Here, we make a small mass balance error, but it should prevent fully saturated layers - EMS[SnowpackElement[i]].theta[ICE]-=tmp_missing_theta*(Constants::density_water/Constants::density_ice); - EMS[SnowpackElement[i]].theta[AIR]=1.-EMS[SnowpackElement[i]].theta[ICE]-EMS[SnowpackElement[i]].theta[WATER]; + const double tmp_excess_theta=(EMS[i].theta[ICE]-max_theta_ice)*(Constants::density_ice/Constants::density_water); + dT[i]+=tmp_excess_theta*(Constants::density_water/Constants::density_ice) / ((EMS[i].c[TEMPERATURE] * EMS[i].Rho) / ( Constants::density_ice * Constants::lh_fusion )); + EMS[i].theta[WATERINDEX]+=tmp_excess_theta; + EMS[i].theta[ICE]-=tmp_excess_theta*(Constants::density_water/Constants::density_ice); + EMS[i].theta[AIR]=1.-EMS[i].theta[ICE]-EMS[i].theta[WATER]-EMS[i].theta[WATER_PREF]; } - //Scaling theta_r between 0 and 0.02: - const double TuningFactor=0.75; //Tuning factor for scaling - //Increase theta_r in case of wetting: - theta_r[i]=std::max(0., std::min(0.02, std::max(EMS[SnowpackElement[i]].theta_r, TuningFactor*EMS[SnowpackElement[i]].theta[WATER]))); - //Decrease theta_r in case of refreezing: - theta_r[i]=std::max(0., std::min(theta_r[i], EMS[SnowpackElement[i]].theta[WATER]-(REQUIRED_ACCURACY_THETA/10.))); - - theta_s[i]=(1. - EMS[SnowpackElement[i]].theta[ICE])*(Constants::density_ice/Constants::density_water); - - //Make ice layers inactive: - if(theta_s[i]1, which is required. - //Now limit grain sizes - if(EMS[SnowpackElement[i]].rg>GRAINRADIUSUPPERTHRESHOLD) EMS[SnowpackElement[i]].rg=GRAINRADIUSUPPERTHRESHOLD; - if(EMS[SnowpackElement[i]].rgGRAINRADIUSUPPERTHRESHOLD) EMS[SnowpackElement[i]].rg=GRAINRADIUSUPPERTHRESHOLD; - if(EMS[SnowpackElement[i]].rgGRAINRADIUSUPPERTHRESHOLD) EMS[SnowpackElement[i]].rg=GRAINRADIUSUPPERTHRESHOLD; - if(EMS[SnowpackElement[i]].rg450.) { - ksat[i]=0.077 * (2.*EMS[SnowpackElement[i]].rg / 1000.)*(2.*EMS[SnowpackElement[i]].rg / 1000.) * exp(-0.0078 * 450.) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; - } else { - ksat[i]=0.077 * (2.*EMS[SnowpackElement[i]].rg / 1000.)*(2.*EMS[SnowpackElement[i]].rg / 1000.) * exp(-0.0078 * EMS[SnowpackElement[i]].theta[ICE] * Constants::density_ice) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; - } - break; - - case CALONNE: - //See: Calonne et al., 3-D image-based numerical computations of snow permeability: links to specific surface area, density, and microstructural anisotropy, TC, 2012. - ksat[i]=0.75 * (EMS[SnowpackElement[i]].ogs / 1000.)*(EMS[SnowpackElement[i]].ogs / 1000.) * exp(-0.013 * EMS[SnowpackElement[i]].theta[ICE] * Constants::density_ice) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; - break; - } - - //Restore original grain size value from backup - EMS[SnowpackElement[i]].rg=tmprg; - } else { //Soil - tmpheight+=dz[i]; //This is only done in soil, so we have a relative reference only for a soil, not for snow. - - switch ( runcase ) { - case UNIFORMSOIL: - //Uniform soil - SetSoil(WFJGRAVELSAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - //SetSoil(SAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - //SetSoil(SANDYLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - break; - case IMISDEFAULT: - //Default case (IMIS): - if(tmpheight<=0.25001) { - //Silt loam - //SetSoil(ORGANIC, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - //SetSoil(SILTLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - SetSoil(SANDYLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else { //Gravel/sand - if(tmpheight<1.001) { - SetSoil(SAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else { - SetSoil(WFJGRAVELSAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } - } - break; - case WFJ: - //Case WFJ: - SetSoil(WFJGRAVELSAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - break; - case CDP: - //Case Col de Porte - SetSoil(SANDYLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - break; - case SNOFILE: - if(EMS[SnowpackElement[i]].rg < 0.5) { - SetSoil(ORGANIC, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 1.) { - SetSoil(CLAY, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 2.) { - SetSoil(CLAYLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 3.) { - SetSoil(LOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 4.) { - SetSoil(LOAMYSAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 5.) { - SetSoil(SAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 6.) { - SetSoil(SANDYCLAY, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 7.) { - SetSoil(SANDYCLAYLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 8.) { - SetSoil(SANDYLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 9.) { - SetSoil(SILT, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 10.) { - SetSoil(SILTYCLAY, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 11.) { - SetSoil(SILTYCLAYLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else if (EMS[SnowpackElement[i]].rg < 12.) { - SetSoil(SILTLOAM, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); - } else { - SetSoil(WFJGRAVELSAND, &theta_r[i], &theta_s[i], &alpha[i], &m[i], &n[i], &ksat[i], &h_e[i]); + // Scale theta_s + if(WATERINDEX==WATER_PREF) { + EMS[i].VG.theta_s*=EMS[i].PrefFlowArea; + } else { + if(enable_pref_flow) { + EMS[i].VG.theta_s*=(1.-EMS[i].PrefFlowArea); } - break; } - //I encountered the following problem: fully saturated soil and freezing water: there is not enough place to store the ice!!! - //In the old snowpack code, this problem was solved by keeping the increase in volume when all the water in the element would freeze, free as theta[AIR]. - //However, this will not work in the Richards, as theta[WATER] is varying per time step. So we keep free a volume as if the soil is saturated AND will freeze: - EMS[SnowpackElement[i]].theta[SOIL]=1.-((Constants::density_water/Constants::density_ice)*theta_s[i]); //Determine the soil content based on the pore space - } - - //Calculate m: - m[i]=(n[i]-1.)/n[i]; - - //Calculate saturation at cut-off point h_e (see Ippisch et al (2006)). - Sc[i]=pow((1.+pow(alpha[i]*fabs(h_e[i]), n[i])), -1.*m[i]); - //Get theta_i_n - if(EMS[i].theta[SOIL]>Constants::eps2) { //Only for soil - theta_i_n[i]=EMS[SnowpackElement[i]].theta[ICE]; - - //Get T_melt that suffices partitioning pressure head into part for ice and part for water - if(theta_i_n[i]>0.) { //If there is ice in soil, calculate freezing point depression. - const double hw0=fromTHETAtoH(EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water)), theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], h_d); - T_melt[i]=T_0+((Constants::g*T_0)/delF)*hw0; - } else { - T_melt[i]=T_0; //Melting point is just the standard melting point. - } - } else { //For snow - theta_i_n[i]=0.; //This sounds strange for snow, but the idea is that ice in snow functions as soil in soil (being the matrix) - T_melt[i]=T_0; //For snow, we currently don't have anything with freezing point depression, as we have in soil. + theta_i_n[i]=0.; //This sounds strange for snow, but the idea is that ice in snow functions as soil in soil (being the matrix) + EMS[i].meltfreeze_tk=Constants::meltfreeze_tk; //For snow, we currently don't have anything with freezing point depression, as we have in soil. + } else { //Soil + EMS[i].VG.SetVGParamsSoil(); + theta_i_n[i]=EMS[i].theta[ICE]; + //Get melting point that suffices partitioning pressure head into part for ice and part for water + const double hw0=std::min(EMS[i].VG.h_e, EMS[i].VG.fromTHETAtoH(EMS[i].theta[WATER]+(EMS[i].theta[ICE]*(Constants::density_ice/Constants::density_water)), h_d)); + EMS[i].meltfreeze_tk=Constants::meltfreeze_tk+((Constants::g*Constants::meltfreeze_tk)/Constants::lh_fusion)*hw0; } //Determine what pressure head should be considered "dry". //Explanation: cold dry new snow layers are initialized with this value. We need to make sure that ALL the other layers have at least a higher pressure head when they contain at least a little bit of water. Else, various numerical troubles arise. //In case the value is too high, we get fluxes out of the completely dry snow layer, and a too low value causes many numerical difficulties as in that case, it represents a much stronger gradient in pressure head than necessary (many iterations and small time steps). //So we check for each particular layer what pressure head is associated with a theta[WATER] that is a smaller deviation from theta_r then the solver will resolve. - const double tmp_head=fromTHETAtoHforICE(theta_r[i]+(REQUIRED_ACCURACY_THETA/10.), theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], h_d, theta_i_n[i]); + const double tmp_head=EMS[i].VG.fromTHETAtoHforICE(EMS[i].VG.theta_r+(REQUIRED_ACCURACY_THETA/10.), h_d, theta_i_n[i]); if(h_d>tmp_head) h_d=tmp_head; if(i==uppernode) h_d_uppernode=tmp_head; //We store this value in order to use it for the LIMITEDFLUXEVAPORATION - if (WriteOutNumerics_Level3==true) - std::cout << "H_D at " << i << ": " << std::scientific << tmp_head << std::fixed << " [alpha: " << alpha[i] << "; m: " << m[i] << "; n: " << n[i] << "; Sc: " << Sc[i] << "; h_e: " << h_e[i] << "\n"; + if (WriteDebugOutput) + std::cout << "H_D at " << i << ": " << std::scientific << tmp_head << std::fixed << " [alpha: " << EMS[i].VG.alpha << "; m: " << EMS[i].VG.m << "; n: " << EMS[i].VG.n << "; Sc: " << EMS[i].VG.Sc << "]; h_e: " << EMS[i].VG.h_e << " min_theta: " << min_theta << "\n"; } - if (WriteOutNumerics_Level2==true) std::cout << "MIN_HEAD: " << std::scientific << h_d << std::fixed << "\n"; //Coupling of SNOWPACK domain to RE-solver domain. Makes sure the EMS.theta[XXX] are within the limits specified by the Van Genuchten parameterizations. - for (i = uppernode; i >= lowernode; i--) { //Cycle over all Richards solver domain layers - //Now calculate the theta that should be considered "dry soil". - theta_d[i]=fromHtoTHETAforICE(h_d, theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], 0.); - - //Now check if this case is not too extreme - if(theta_d[i]Constants::eps2) { //For soil - if(EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water)) > theta_s[i]) { - wateroverflow[i]+=(EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water))-theta_s[i]); - EMS[SnowpackElement[i]].theta[WATER]=theta_s[i]-(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water)); + if(EMS[i].theta[SOIL]>Constants::eps2) { //For soil + if(WATERINDEX==WATER){ //As for soil, we only use the matrix flow part, and we inhibit water flow in preferential flow, we should check this only for the matrix flow + const double corr_factor = (boolFirstFunctionCall) ? (0.95) : (1.); + if(EMS[i].theta[WATERINDEX]+(EMS[i].theta[ICE]*(Constants::density_ice/Constants::density_water)) > corr_factor*EMS[i].VG.theta_s) { + EMS[i].theta[WATERINDEX]=corr_factor*EMS[i].VG.theta_s-(EMS[i].theta[ICE]*(Constants::density_ice/Constants::density_water)); + } } - } else { //For snow - if(EMS[SnowpackElement[i]].theta[WATER] > theta_s[i]) { - wateroverflow[i]+=EMS[SnowpackElement[i]].theta[WATER]-theta_s[i]; - EMS[SnowpackElement[i]].theta[WATER]=theta_s[i]; + } else { //For snow + if(EMS[i].theta[WATERINDEX] > EMS[i].VG.theta_s) { + EMS[i].theta[WATERINDEX]=EMS[i].VG.theta_s; } } // 2) Not too dry - if(EMS[SnowpackElement[i]].theta[SOIL]>Constants::eps2) { //For soil - if(AllowDrySoilLayers==true) { - if(not( (EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water))) < theta_r[i]) && (EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water))) < theta_d[i]) { - wateroverflow[i]+=( (EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water))) - theta_d[i]); - EMS[SnowpackElement[i]].theta[WATER]=theta_d[i]-(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water)); - activelayer[i]=true; - } else { - if ((EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water))) < theta_r[i]) { - activelayer[i]=false; - } else { - activelayer[i]=true; - } + if(EMS[i].theta[SOIL]>Constants::eps2) { //For soil + if(matrix==true) { + if ((EMS[i].theta[WATERINDEX]+(EMS[i].theta[ICE]*(Constants::density_ice/Constants::density_water))) < theta_d[i]) { + EMS[i].theta[WATERINDEX]=theta_d[i]-(EMS[i].theta[ICE]*(Constants::density_ice/Constants::density_water)); } - } else { - if ((EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water))) < theta_d[i]) { - wateroverflow[i]+=( (EMS[SnowpackElement[i]].theta[WATER]+(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water))) - theta_d[i]); - EMS[SnowpackElement[i]].theta[WATER]=theta_d[i]-(EMS[SnowpackElement[i]].theta[ICE]*(Constants::density_ice/Constants::density_water)); - EMS[SnowpackElement[i]].theta[AIR]=1.-EMS[SnowpackElement[i]].theta[WATER]-EMS[SnowpackElement[i]].theta[ICE]-EMS[SnowpackElement[i]].theta[SOIL]; + } /* else { + if (EMS[i].theta[WATER_PREF]0) { + // Bottom element remains unchanged, add all elements above accordingly. Note that this assumes that the outer loop is executed bottom to top + EMS[i].h = h_n[i-1] - (.5*EMS[i-1].L + .5*EMS[i].L); + // As partitioning in water and ice is done in enforceThermalEquilibrium(), assume all water for now + EMS[i].theta[WATER] = EMS[i].VG.fromHtoTHETAforICE(EMS[i].h, 0.); + EMS[i].theta[ICE] = 0.; } } + EMS[i].VG.enforceThermalEquilibrium(); + snowpackBACKUPTHETAICE[i]=EMS[i].theta[ICE]; + std::cout << " changed to: h=" << EMS[i].VG.fromTHETAtoHforICE(EMS[i].theta[WATERINDEX], h_d, EMS[i].theta[ICE]) << " theta[WATER]=" << EMS[i].theta[WATER] << " theta[ICE]=" << EMS[i].theta[ICE] << std::endl; } - //Now copy the EMS water content into the working arrays to solve Richards-equation (so this is the important part were this function is coupled to the rest of SNOWPACK). - if(activelayer[i]==true) { - // Now calculate initial pressure head: - h_n[i]=fromTHETAtoHforICE(EMS[SnowpackElement[i]].theta[WATER], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], h_d, theta_i_n[i]); - theta_n[i]=fromHtoTHETAforICE(h_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_i_n[i]); //This is the current theta, which we determine from h_n[i]. + if(EMS[i].theta[SOIL]EMS[i].VG.h_e-Constants::eps && EMS[i].h>EMS[i].VG.h_e-Constants::eps && NDS[i].z < Xdata.Seaice->SeaLevel) { + h_n[i]=EMS[i].h; + } } else { - theta_n[i]=EMS[SnowpackElement[i]].theta[WATER]; - h_n[i]=h_d; + //For soil, we take the matrix pressure head as lower boundary for the snow preferential flow + theta_i_n[i]=EMS[i].theta[ICE]; + if(matrix == false) theta_i_n[i]=EMS[i].theta[ICE]; // Keep ice contents in sync, as we skip soil freezing with preferential flow (in turn because we suppress preferential flow in soil) + h_n[i]=EMS[i].VG.fromTHETAtoHforICE(EMS[i].theta[WATER], h_d, theta_i_n[i]); } + theta_n[i]=EMS[i].VG.fromHtoTHETAforICE(h_n[i], theta_i_n[i]); //This is the current theta, which we determine from h_n[i]. //Determine source/sink term - s[i]=0.; //Reset source/sink term - - //Add wateroverflow (Remember: units wateroverflow [m^3/m^3]): - if (alpine3d==false && (wateroverflow[i]>0 || SafeMode==false)) { //In SafeMode, we don't allow the negative wateroverflow to be used as sink term, as a negative wateroverflow is caused by initialization of very dry snow layers, so the sink term would basically be a sink term in very dry conditions, which is numerically unstable. - if(i==uppernode) { - surfacefluxrate+=(wateroverflow[i]*dz[i])/sn_dt; - wateroverflow[i]=0.; - } else { - if((wateroverflow[i]*dz[i])/sn_dt < ksat[i+1]) { //Check if influx is not too large - s[i]+=wateroverflow[i]/sn_dt; //These terms mainly are caused by merging elements, where the mass of the upper element is added to the lower one. This can lead to too much water in a certain element. We add this as a source term. - wateroverflow[i]=0.; //Since we have put wateroverflow in source/sink term, it's not an overflow anymore for this layer. - } else { //Else limit influx and throw other water away... So this is a water hole in the model. I suggest making a variable MS_LATERALRUNOFF to track this water. - s[i]+=(ksat[i]/dz[i]); - wateroverflow[i]-=(ksat[i]/dz[i])*sn_dt; - } - } - } + s[i]=0.; //Reset source/sink term //Now add soilsurfacesourceflux (in case RemoveElements removed the lowest snow element): - if(soilsurfacesourceflux>0. && i==nsoillayers_richardssolver) { //We assign source flux in the lowest snow element if the source flux is >0. This can only be the case when we use RE for snow, so we don't have to check for this. + if(soilsurfacesourceflux>0. && i==Xdata.SoilNode) { //We assign source flux in the lowest snow element if the source flux is >0. This can only be the case when we use RE for snow, so we don't have to check for this. //Remember: soilsurfacesourceflux=[m^3/m^2/s] - s[i]+=soilsurfacesourceflux/dz[i]; //Soilsurfacesourceflux>0. if we remove the first snow element above the soil AND there are more snow layers (else it is a surfaceflux) AND we use RE for snow. + s[i]+=soilsurfacesourceflux/dz[i]; //Soilsurfacesourceflux>0. if we remove the first snow element above the soil AND there are more snow layers (else it is a surfaceflux) AND we use RE for snow. } //Add source/sink term from other parts of SNOWPACK (in particular Canopy.cc) s[i]+=EMS[i].lwc_source/sn_dt; EMS[i].lwc_source=0.; // Now that we used the variable, reset it. - - //To now the flux of water in/out of the model domain due to the source/sink term. - totalsourcetermflux+=s[i]*dz[i]; } //Initialize upper boundary in case of Dirichlet - if(TopBC==DIRICHLET) { + if (TopBC==DIRICHLET) { aTopBC=DIRICHLET; htop=h_n[uppernode]; } + // Initialize upper boundary for evaporation + surfacefluxrate += (ql/Constants::lh_vaporization)/Constants::density_water; + Sdata.mass[SurfaceFluxes::MS_EVAPORATION] += ql*sn_dt/Constants::lh_vaporization; + if(Xdata.swe < Constants::eps2) { + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += ql*sn_dt/Constants::lh_vaporization; + } + ql = 0.; //We dealt with ql, so set it to 0, only to be possibly modified at the end of the function. + + //Important: We have to be aware that the previous time step may be too large for the infiltration flux in the current time step. Then, too much of the infiltration flux may be rejected. + // Two mechanisms to prevent this are: provide a better estimate of the necessery time step (done here), and try trigger a rewind with smaller time step first, before limiting the infilitration flux (done later). + if((TopBC == LIMITEDFLUXINFILTRATION || TopBC == LIMITEDFLUX) && (TopFluxRate>0.) && ( + (LIMITEDFLUXINFILTRATION_soil==true && Xdata.SoilNode==nE) + || (LIMITEDFLUXINFILTRATION_snowsoil==true && Xdata.SoilNodeupdateFreeboard(Xdata); + } + aBottomBC=DIRICHLET; + hbottom=std::min((Xdata.Seaice->SeaLevel - NDS[lowernode].z - .5 * EMS[lowernode].L), NDS[uppernode+1].z); // Keep hbottom smaller than depth of simulation domain. + h_n[lowernode]=hbottom; + EMS[lowernode].salinity += SeaIce::OceanSalinity * (EMS[lowernode].VG.fromHtoTHETAforICE(h_n[lowernode], theta_i_n[lowernode]) - EMS[lowernode].theta[WATER]); + EMS[lowernode].theta[WATER] = theta_n[lowernode] = EMS[lowernode].VG.fromHtoTHETAforICE(h_n[lowernode], theta_i_n[lowernode]); + EMS[lowernode].updDensity(); } + SalinityTransport Salinity(nE); //Note: there are 2 iterations. First, the iteration starts to match the Richards solver time step to the SNOWPACK time step. Simple example: assume SNOWPACK time step is 15 minutes and //Richards solver time step is 1 minute, there should be 15 iterations to match the solution to the SNOWPACK time step. @@ -1414,20 +1140,19 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Determine mass at beginning of snowpack time step. mass1=0.; - for (i = uppernode; i >= lowernode; i--) { + for (i = lowernode; i <= uppernode; i++) { mass1+=(theta_n[i]+(theta_i_n[i]*(Constants::density_ice/Constants::density_water)))*dz[i]; } - do - { + do { if(DoRewindFlag==false) { //Only if we are not doing a rewind, we should increase the number of steps (else it basically is the same time step). nsteps++; //Increase the number of steps niter_nrewinds=0; //Reset rewind counter } Xdata.ReSolver_dt=dt; //Store the last used time step. - if ((TimeAdvance+dt)>=snowpack_dt) { //If our time step is so large that the integrated time step will exceed the SNOWPACK time step, we limit the dt for the current time step... - dt=snowpack_dt-TimeAdvance; //...so it matches exactly the SNOWPACK time step. + if ((TimeAdvance+dt)>=sn_dt) { //If our time step is so large that the integrated time step will exceed the SNOWPACK time step, we limit the dt for the current time step... + dt=sn_dt-TimeAdvance; //...so it matches exactly the SNOWPACK time step. StopLoop=true; //And we set the switch to stop the Richards solver. } TimeAdvance+=dt; //Update the total time in this time step. This variable is used to match SNOWPACK time steps. @@ -1442,7 +1167,8 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Initialize values for the first iteration (iteration m) - for (i = uppernode; i >= lowernode; i--) { + i = uppernode + 1; + while (i-- > lowernode) { // Note, it is not possible to do an educated guess. The guess should be mass-conservative, which is very difficult to achieve. h_np1_m[i]=h_n[i]; theta_np1_m[i]=theta_n[i]; @@ -1450,10 +1176,9 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) } //Write out initial water content - if ((WriteOutNumerics_Level1==true && nsteps==1 && niter_nrewinds==0 ) || (WriteOutNumerics_Level2==true)) { - for (i = uppernode; i >= lowernode; i--) { - const string is_active = (activelayer[i])? "true" : "false" ; - std::cout << "ITER: " << niter << " i: " << i << " active? " << is_active << std::setprecision(15) << "; h_n: " << h_n[i] << " (h_np1: " << h_np1_m[i] << ") theta: " << theta_n[i] << std::setprecision(6) << "(" << theta_r[i] << "-" << theta_s[i] << ") ice: " << EMS[SnowpackElement[i]].theta[ICE] << "/" << theta_i_n[i] << " (vg_params: " << alpha[i] << " " << m[i] << " " << n[i] << ")\n"; + if (WriteDebugOutput) { + for (i = lowernode; i <= uppernode; i++) { + std::cout << "ITER: " << niter << " i: " << i << std::setprecision(15) << "; h_n: " << h_n[i] << " (h_np1: " << h_np1_m[i] << ") theta: " << theta_n[i] << std::setprecision(6) << "(" << EMS[i].VG.theta_r << "-" << EMS[i].VG.theta_s << ") ice: " << EMS[i].theta[ICE] << "/" << theta_i_n[i] << " (vg_params: " << EMS[i].VG.alpha << " " << EMS[i].VG.m << " " << EMS[i].VG.n << ") s[i]: " << s[i] << "\n"; } } @@ -1462,69 +1187,59 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) while (boolConvergence==false && DoRewindFlag==false) { //In theory, this can create an endless loop, but for this, I put a throw in the code when no convergence is achieved, because then the situation is hopeless anyway. niter++; - // niter_snowpack_dt++; + niter_snowpack_dt++; memstate++; int solver_result=0; //Prepare matrices //Update state properties - for (i = uppernode; i >= lowernode; i--) { - if(activelayer[i]==true) { - //Calculate theta from h - theta_np1_m[i]=fromHtoTHETAforICE(h_np1_m[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_i_np1_m[i]); - - //Determine Se - if(h_np1_m[i]0. && K[i]>0. ) { //Only for soil and when there is ice in the soil - //q=theta_i_np1_m[i]/(theta_s[i]-theta_r[i]); //This is how Dall'Amico presents it, but it is based on Hanssen (2004), who defines it as: - const double q = (theta_i_np1_m[i]*(Constants::density_ice/Constants::density_water))/((theta_np1_m[i]+(theta_i_np1_m[i]*(Constants::density_ice/Constants::density_water)))-theta_r[i]); //Hanssen (2004). - impedance[i]=pow(10., -1.*omega*q); - } else { - impedance[i]=1.; - } + //Applying ice impedance on K when requested + if (K_frozen_soilType != IGNORE && i < Xdata.SoilNode && theta_i_np1_m[i]>0.) { //Only for soil and when there is ice in the soil + if (K_frozen_soilType==OMEGA) { + //See Zhao et al. (1997) and Hansson et al. (2004) [Dall'Amicao, 2011]. + //q=theta_i_np1_m[i]/(EMS[i].VG.theta_s-EMS[i].VG.theta_r); //This is how Dall'Amico presents it, but it is based on Hanssen (2004), who defines it as: + const double q = (theta_i_np1_m[i]*(Constants::density_ice/Constants::density_water))/((theta_np1_m[i]+(theta_i_np1_m[i]*(Constants::density_ice/Constants::density_water)))-EMS[i].VG.theta_r); //Hanssen (2004). + impedance[i]=pow(10., -1.*omega*q); K[i]*=impedance[i]; + } else if (K_frozen_soilType==LIQUIDPORESPACE) { + const double tmp_Se = ((theta_np1_m[i] - EMS[i].VG.theta_r)/(EMS[i].VG.theta_s - EMS[i].VG.theta_r)); + K[i]=EMS[i].VG.ksat*sqrt(tmp_Se)*pow((1.-(pow(1.-pow(tmp_Se*EMS[i].VG.Sc,(1./EMS[i].VG.m)),EMS[i].VG.m)))/(1.-pow(1.-pow(EMS[i].VG.Sc,(1./EMS[i].VG.m)), EMS[i].VG.m)),2.); } + } - //Calculate the specific moisture capacity (which is derivative d.theta/d.h) - if(Se[i]<1.) { //No saturation - C[i]=alpha[i]*n[i]*m[i]*((theta_s[i]-theta_r[i])/Sc[i])*(pow((alpha[i]*fabs(h_np1_m[i])), (n[i]-1.)))*(pow(1.+pow((alpha[i]*fabs(h_np1_m[i])), n[i]), (-1.*m[i]-1.))); - if(std::isnan(C[i])) solver_result=-1; - } else { //Saturation - C[i]=0.; - } - } else { //If not an active layer - K[i]=0.; + //Calculate the specific moisture capacity (which is derivative d.theta/d.h) + if(h_np1_m[i]>EMS[i].VG.h_e) { C[i]=0.; + } else { + C[i]=EMS[i].VG.dtheta_dh(std::min(h_np1_m[i], EMS[i].VG.h_e)); + if(std::isnan(C[i])) solver_result=-1; } - if(WriteOutNumerics_Level3==true) std::cout << "HYDPROPS: i=" << i << std::scientific << " Se=" << Se[i] << " C=" << C[i] << " K=" << K[i] << ".\n" << std::fixed; + + //Update liquid density and brine salinity + rho[i] = Constants::density_water + ((Xdata.Seaice!=NULL)?(SeaIce::betaS * EMS[i].salinity):(0.)); + + if(WriteDebugOutput) std::cout << "HYDPROPS: i=" << i << std::scientific << " Se=" << Se[i] << " C=" << C[i] << " K=" << K[i] << " rho=" << rho[i] << " sal=" << EMS[i].salinity << ".\n" << std::fixed; } for (i = lowernode; i <= uppernode; i++) { //Determine K at interface nodes // 1) Determine k_np1_m_ip12 - if (i!=uppernode && activelayer[i+1]==true) { + if (i!=uppernode) { //For the rest of the domain, we might have heterogeneous soils, so we have to approximate the hydraulic conductivity at the interface nodes. switch (K_AverageType) { @@ -1534,6 +1249,19 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) break; } + case LOGMEAN: + { + // See: https://doi.org/10.1061/9780784480472.078 + if (K[i]==K[i+1]) { + k_np1_m_ip12[i]=K[i]; + } else if (K[i]==0. || K[i+1]==0.) { + k_np1_m_ip12[i]=0.; + } else { + k_np1_m_ip12[i]=(K[i+1]-K[i])/log(K[i+1]/K[i]); + } + break; + } + case GEOMETRICMEAN: { k_np1_m_ip12[i]=sqrt(K[i]*K[i+1]); @@ -1562,37 +1290,37 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) case UPSTREAM: { - if (((h_np1_m[i+1]-h_np1_m[i])/dz_down[i+1]) - cos_sl > 0.) { + if (((h_np1_m[i+1]-h_np1_m[i])/dz_down[i+1]) - Xdata.cos_sl > 0.) { k_np1_m_ip12[i]=K[i]; } else { k_np1_m_ip12[i]=K[i+1]; } break; } + default: + InvalidArgumentException("Unknown K_AverageType value provided", AT); + + } + if(matrix==false) { + // When solving preferential flow, we suppress liquid water flow in soil by setting hydraulic conductivity to 0. + if(i0.) && ( - (LIMITEDFLUXINFILTRATION_soil==true && int(nsoillayers_snowpack)==int(nE)) - || (LIMITEDFLUXINFILTRATION_snowsoil==true && int(nsoillayers_snowpack)0.) && ( + (LIMITEDFLUXINFILTRATION_soil==true && Xdata.SoilNode==nE) + || (LIMITEDFLUXINFILTRATION_snowsoil==true && Xdata.SoilNode0) ? k_np1_m_im12[uppernode]*(((h_np1_m[uppernode]-h_np1_m[uppernode-1])/dz_down[uppernode]) + cos_sl) : 0.); // plus what could leave below - - // For alpine3d simulations, we are stricter for the sake of stability: we also don't allow a positive influx when there is ponding inside the model domain: - if (alpine3d==true) { - bool isPonding=false; - for(int jj=lowernode; jj<=uppernode; jj++) { - if(h_np1_m[jj]>h_e[jj]) isPonding=true; + // Determine the limiting flux, which is the flux that would fill the upper element: + const double flux_compare = (dz[uppernode]*(EMS[uppernode].VG.theta_s - (theta_np1_m[uppernode] + theta_i_np1_m[uppernode]))/dt); + if((0.999*flux_compare) < TopFluxRate) { //If prescribed flux is too large: + if(dt>MIN_DT_FOR_INFILTRATION) { //Trigger rewind when the top layer cannot accomodate for all infiltrating flux + solver_result=-1; + } else { //Limit flux. Note: we multiply flux_compare with 0.999 because flux_compare can be + TopFluxRate=std::max(0., (0.999*flux_compare)); //regarded as the asymptotic case from which we want to stay away a little. } - if(isPonding==true) TopFluxRate=0.; - } - - if((0.999*flux_compare) < TopFluxRate) { //Limit flux if necessary. Note: we multiply flux_compare with 0.999 because flux_compare can be - TopFluxRate=std::max(0., (0.999*flux_compare)); //regarded as the asymptotic case from which we want to stay away a little. } } - if((TopBC == LIMITEDFLUXEVAPORATION || TopBC == LIMITEDFLUX) && (TopFluxRate<0.) && ((LIMITEDFLUXEVAPORATION_soil==true && (int(nsoillayers_snowpack)==int(nE) || toplayer==nsoillayers_snowpack)) || (LIMITEDFLUXEVAPORATION_snow==true && int(nsoillayers_snowpack) TopFluxRate) { TopFluxRate=std::min(0., flux_compare); } @@ -1661,35 +1381,43 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) BottomFluxRate=0.; //Dirichlet BC, so no prescribed flux. theta_np1_m[lowernode]=theta_n[lowernode]; } else if (BottomBC==WATERTABLE) { + // Default water table aBottomBC=DIRICHLET; //Water table is a Dirichlet BC. - BottomFluxRate=0.; //Dirichlet BC, so no prescribed flux. + h_n[lowernode]=EMS[lowernode].VG.h_e; + hbottom=h_n[lowernode]; theta_np1_m[lowernode]=theta_n[lowernode]; + BottomFluxRate=0.; //Dirichlet BC, so no prescribed flux. } else if (BottomBC==NEUMANN) { aBottomBC=NEUMANN; //Set Neumann BC. //Note: BottomFluxRate is defined as gradient over pressure head. For outflux (drainage), pressure head is increasing with increasing height, so BottomFluxRate is positive. BottomFluxRate=0.0000005; //Flux for Neumann BC. } else if (BottomBC==FREEDRAINAGE) { - //First calculate flux between lowest and lowest+1 element. - const double tmpgrad=((h_np1_m[lowernode+1]-h_np1_m[lowernode])/dz_up[lowernode]); //Note: flux would be (tmpgrad * K). - if((tmpgrad+cos_sl) < 0.) { - //In this case, we would create influx at lower boundary, which does not work with FREEDRAINAGE. - //Then set zero flux: - aBottomBC=NEUMANN; - BottomFluxRate=0.; + aBottomBC=NEUMANN; + if(uppernode>0) { + //First calculate flux between lowest and lowest+1 element. + const double tmpgrad=((h_np1_m[lowernode+1]-h_np1_m[lowernode])/dz_up[lowernode]); //Note: flux would be (tmpgrad * K). + if((tmpgrad+Xdata.cos_sl) < 0.) { + //In this case, we would create influx at lower boundary, which does not work with FREEDRAINAGE. + //Then set zero flux: + BottomFluxRate=0.; + } else { + //Now, prescribe flux at lower boundary equivalent to tmpgrad + BottomFluxRate=(tmpgrad+Xdata.cos_sl)*k_np1_m_im12[lowernode]; + } } else { - aBottomBC=NEUMANN; - //Now, prescribe flux at lower boundary equivalent to tmpgrad - BottomFluxRate=(tmpgrad+cos_sl)*k_np1_m_im12[lowernode]; + //With one element only, fall back to GRAVITATIONALDRAINAGE + BottomFluxRate=k_np1_m_im12[lowernode]; } } else if (BottomBC==SEEPAGEBOUNDARY) { //Neumann with flux=0 in case of unsaturated - //Dirichlet with h_bottom=0 in case of saturated - if(h_n[lowernode+1]<0.) { + //Dirichlet with h_bottom=h_e in case of saturated + if(h_n[lowernode]updateFreeboard(Xdata); + hbottom=std::min((Xdata.Seaice->SeaLevel - NDS[lowernode].z - .5 * EMS[lowernode].L), NDS[uppernode+1].z); // Keep hbottom smaller than depth of simulation domain. + } + h_n[lowernode] = hbottom; + BottomFluxRate=0.; + theta_np1_m[lowernode]=theta_n[lowernode]; } else if (BottomBC==LIMITEDFLUX) { //Probably also not necessary. std::cout << "ERROR in ReSolver1d.cc: No implementation for LIMITEDFLUX lower boundary condition. Either choose a saturated DIRICHLET (lower boundary in water table), or choose GRAVITATIONAL or FREEDRAINAGE (lower boundary not in water table).\n"; @@ -1710,65 +1453,119 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) } - if (WriteOutNumerics_Level2==true) + if (WriteDebugOutput) std::cout << "BOUNDARYTOPFLUX: [ BC: " << TopBC << "] " << std::scientific << TopFluxRate << " " << surfacefluxrate << " " << theta_n[lowernode] << " " << K[lowernode] << " " << ((h_np1_mp1[lowernode])+(((TopFluxRate/k_np1_m_im12[lowernode])-1.)*dz_down[lowernode])) << " " << h_np1_mp1[lowernode] << " " << k_np1_m_im12[lowernode] << " " << (TopFluxRate/k_np1_m_im12[lowernode]) << "\n" << std::fixed; - if (WriteOutNumerics_Level2==true) - std::cout << "NUMERICS: BCTOP: " << TopBC << std::scientific << " TOPFLUXRATE = " << TopFluxRate << " SURFACEFLUXRATE = " << surfacefluxrate << "\n" << std::fixed; + // Verify source/sink term + totalsourcetermflux=0.; + for (i = lowernode; i <= uppernode; i++) { + if(s[i] != 0.) { + if(LIMITEDFLUXSOURCESINKTERM==true) { + const double tmp = s[i]; + // Determine the limiting influx: + const double flux_compare_max = //The limiting flux is (positive is inflow): + std::max(0., (dz[i]*(EMS[i].VG.theta_s - (theta_np1_m[i] + theta_i_np1_m[i]))/dt) // net flux that would lead to saturation of the layer + + ((i>lowernode) ? k_np1_m_im12[i]*(((h_np1_m[i]-h_np1_m[i-1])/dz_down[i]) + Xdata.cos_sl) : BottomFluxRate) // plus what could leave below + - ((ilowernode) ? k_np1_m_im12[i]*(((h_np1_m[i]-h_np1_m[i-1])/dz_down[i]) + Xdata.cos_sl) : BottomFluxRate) // minus what will leave below + + ((iConstants::eps2) { + if(dt>MIN_DT_FOR_INFILTRATION) { //Trigger rewind when the layer cannot accomodate for the source/sink term + s[i]=tmp; //Reset source/sink term to original value + solver_result=-1; //Trigger rewind + } else { + std::cout << "[W] [" << date.toString(mio::Date::ISO) << "] ReSolver1d.cc: for layer " << i << ", prescribed source/sink term could not be applied with dt=" << dt << ". Term reduced from " << tmp << " to " << s[i] << ".\n"; + } + } + } - if (niter==1) { - if (int(nsoillayers_snowpack) 0 means influx! - snowsoilinterfaceflux_before=((((h_n[nsoillayers_richardssolver]-h_n[nsoillayers_richardssolver-1])/dz_up[nsoillayers_richardssolver-1])+cos_sl)*k_np1_m_ip12[nsoillayers_richardssolver-1]*dt); + //Update now the flux of water in/out of the model domain due to the source/sink term. + totalsourcetermflux+=s[i]*dz[i]; } } - //Solve equation - std::fill(ainv.begin(), ainv.end(), 0.); //This is very important: with inverting the matrix, it may become non-tridiagonal! So we have to explicitly set its elements to 0, because some of the for-loops only touch the tridiagonal part of the matrix. - for (i = uppernode; i >= lowernode; i--) { - j=i; //As matrix A is tridiagonal, so it can be filled very efficiently. However, I keep the notation of i and j, so it's better understood how the structure of A is. I only evaluate i==j. - //This part is for the DGESVD/DGESDD solver, which uses full matrix a (ainv). We always need them, because in case DGTSV fails, we should be able to fall back on DGESVD/DGESDD: - if(i==j) { - //Set up the matrix diagonal - ainv[j*(uppernode+1)+i]=(1./dt)*C[i]; - - //The following two lines assume Neumann boundary conditions (for upper and lowernode, one of the terms drop out). If Dirichlet is used, this will be corrected later. - if(i!=lowernode) ainv[j*(uppernode+1)+i]+=(1./(dz_[i]))*((k_np1_m_im12[i]/(dz_down[i]))); - if(i!=uppernode) ainv[j*(uppernode+1)+i]+=(1./(dz_[i]))*((k_np1_m_ip12[i]/(dz_up[i]))); - - //Correct diagonal in case of Dirichlet - if(aTopBC==DIRICHLET && i==uppernode) { - ainv[i*(uppernode+1)+i]=1.; - } - if(aBottomBC==DIRICHLET && i==lowernode) { - ainv[i*(uppernode+1)+i]=1.; - } +#ifdef CLAPACK + if(ActiveSolver==DGESVD || AllowSwitchSolver==true) std::fill(ainv.begin(), ainv.end(), 0.); //This is very important: with inverting the matrix, it may become non-tridiagonal! So we have to explicitly set its elements to 0, because some of the for-loops only touch the tridiagonal part of the matrix. +#endif + for (i = lowernode; i <= uppernode; i++) { + j=i; //As matrix A is tridiagonal, it can be filled very efficiently. The notation of i and j is kept for clarity of the structure of A. However, only evaluating when i==j is required. - //Set up the matrix upper and lower diagonals - if(i!=lowernode) ainv[i*(uppernode+1)+(i-1)]=(-1./(dz_[i]))*((k_np1_m_im12[i]/(dz_down[i]))); - if(i!=uppernode) ainv[i*(uppernode+1)+(i+1)]=(-1./(dz_[i]))*((k_np1_m_ip12[i]/(dz_up[i]))); - //Correct upper and lower diagonals in case of Dirichlet - if(aTopBC==DIRICHLET && i==uppernode) { - ainv[(i-1)*(uppernode+1)+i]=0.; - ainv[i*(uppernode+1)+(i-1)]=0.; - } - if(aBottomBC==DIRICHLET && i==lowernode) { - ainv[(i+1)*(uppernode+1)+i]=0.; - ainv[i*(uppernode+1)+(i+1)]=0.; + // Calculate density related variables + double rho_up = 0; + double rho_down = 0; + if(i==uppernode) { + rho_up = 0.5 * (rho[i] + Constants::density_water); + } else { + rho_up = 0.5 * (rho[i] + rho[i+1]); + } + if(i==lowernode) { + rho_down = 0.5 * (rho[i] + Constants::density_water + ((Xdata.Seaice!=NULL)?(SeaIce::betaS * Xdata.Seaice->OceanSalinity):(0.))); + } else { + rho_down = 0.5 * (rho[i] + rho[i-1]); + } + + +#ifdef CLAPACK + //This part is for the DGESVD/DGESDD solver, which uses full matrix inversion of matrix A (ainv). + if(ActiveSolver==DGESVD || AllowSwitchSolver==true) { + // For DGESVD, i indexes rows, j index columns, both starting at 0. + if(i==j) { + size_t LDA=(uppernode+1); + size_t i_d=j*LDA+i; // The index for the main diagonal + size_t i_u=(j+1)*LDA+i; // The index for the upper diagonal + size_t i_l=(j-1)*LDA+i; // The index for the lower diagonal + + //Set up the matrix diagonal + ainv[i_d]=(1./dt)*(C[i]/rho[i]); + + //The following two lines assume Neumann boundary conditions (for upper and lowernode, one of the terms drop out). If Dirichlet is used, this will be corrected later. + if(i!=lowernode) ainv[i_d]+=(1./dz_[i])*(k_np1_m_im12[i]/rho_down/dz_down[i]); + if(i!=uppernode) ainv[i_d]+=(1./dz_[i])*(k_np1_m_ip12[i]/rho_up/dz_up[i]); + + //Correct diagonal in case of Dirichlet + if(aTopBC==DIRICHLET && i==uppernode) { + ainv[i_d]=1.; + } + if(aBottomBC==DIRICHLET && i==lowernode) { + ainv[i_d]=1.; + } + + //Set up the matrix upper and lower diagonals + if(i!=lowernode) ainv[i_l]=(-1./dz_[i])*(k_np1_m_im12[i]/rho_down/dz_down[i]); + if(i!=uppernode) ainv[i_u]=(-1./dz_[i])*(k_np1_m_ip12[i]/rho_up/dz_up[i]); + + //Correct upper and lower diagonals in case of Dirichlet + if(aTopBC==DIRICHLET && i==uppernode) { + ainv[i_l]=0.; + } + if(aBottomBC==DIRICHLET && i==lowernode) { + ainv[i_u]=0.; + } + + // Prevent degenerate case + if(ainv[i_d]==0. && ainv[i_l]==0. && ainv[i_u]==0.) { + ainv[i_d]=1.; + } } } +#endif //This part is for the DGTSV or TDMA solver, that uses the fact that A is a tridiagonal matrix, so we only have to specify the diagonals and subdiagonals. - if(ActiveSolver==DGTSV || ActiveSolver==TDMA ) { + if(ActiveSolver==DGTSV || ActiveSolver==TDMA) { if(i==j) { //Set up the matrix diagonal - ad[i]=(1./dt)*C[i]; + ad[i]=(1./dt)*(C[i]/rho[i]); //The following two lines assume Neumann boundary conditions (for upper and lowernode, one of the terms drop out). If Dirichlet is used, this will be corrected later. - if(i!=lowernode) ad[i]+=(1./(dz_[i]))*((k_np1_m_im12[i]/(dz_down[i]))); - if(i!=uppernode) ad[i]+=(1./(dz_[i]))*((k_np1_m_ip12[i]/(dz_up[i]))); + if(i!=lowernode) ad[i]+=(1./dz_[i])*(k_np1_m_im12[i]/rho_down/dz_down[i]); + if(i!=uppernode) ad[i]+=(1./dz_[i])*(k_np1_m_ip12[i]/rho_up/dz_up[i]); //Correct diagonal in case of Dirichlet if(aTopBC==DIRICHLET && i==uppernode) { @@ -1779,79 +1576,44 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) } //Set up the matrix upper and lower diagonals - if(i!=lowernode) adl[i-1]=-(1./(dz_[i]))*((k_np1_m_im12[i]/(dz_down[i]))); - if(i!=uppernode) adu[i]=-(1./(dz_[i]))*((k_np1_m_ip12[i]/(dz_up[i]))); + if(i!=lowernode) adl[i-1]=-(1./dz_[i])*(k_np1_m_im12[i]/rho_down/dz_down[i]); + if(i!=uppernode) adu[i]=-(1./dz_[i])*(k_np1_m_ip12[i]/rho_up/dz_up[i]); //Correct diagonals in case of Dirichlet if(aTopBC==DIRICHLET && i==uppernode) { - adu[i-1]=0.; adl[i-1]=0.; } if(aBottomBC==DIRICHLET && i==lowernode) { adu[i]=0.; - adl[i]=0.; } - } - } - - //We copy here the matrix to the ainv, which is passed to the SVD-routine later on. This ainv is altered externally, that's why we need a copy. - //ainv[j*(uppernode+1)+i]=a[i][j]; - //Determine R: - term_up[i]=0.; - term_down[i]=0.; - - //Fill R.H.S. vector - //Note: the gravity term is not explicitly in Celia et al (1990). It is just z[i], as pressure head should already be scaled by rho_water * g. Then it is taken outside the nabla, by using the chain rule. - if(i==uppernode) { - if(aTopBC==NEUMANN) { //Neumann, following Equation 4 in McCord, WRR (1991). - term_up[i]=(TopFluxRate)*dz_up[i] - cos_sl*(dz_up[i]*k_np1_m_ip12[i]); - } else { //Dirichlet - term_up[i]=0.; - } - } else { - if(activelayer[i+1]==true) { - term_up[i]=k_np1_m_ip12[i]*(h_np1_m[i+1]-h_np1_m[i]); - } else { - //Analogue to Neumann at top: - term_up[i]=(0.)*dz_up[i] - cos_sl*(dz_up[i]*k_np1_m_ip12[i]); - } - } - if(i==lowernode) { - if(aBottomBC == NEUMANN) { //Neumann, following Equation 4 in McCord, WRR (1991). - term_down[i]=(BottomFluxRate)*dz_down[i] - cos_sl*(dz_down[i]*k_np1_m_im12[i]); - } else { //Dirichlet - term_down[i]=0.; - } - } else { - if(activelayer[i-1]==true) { - term_down[i]=k_np1_m_im12[i]*(h_np1_m[i]-h_np1_m[i-1]); - } else { - //Analogue to Neumann at top: - term_down[i]=(0.)*dz_down[i] - cos_sl*(dz_down[i]*k_np1_m_im12[i]); + // Prevent degenerate case + if(ad[i]==0. && adl[i-1]==0. && adu[i]==0.) { + ad[i]=1.; + } } } + } - //RHS eq. 17 in Celia et al. (1990): - r_mpfd[i]=(1./(dz_[i]))*((term_up[i]/dz_up[i])-(term_down[i]/dz_down[i])) + cos_sl*((k_np1_m_ip12[i]-k_np1_m_im12[i])/(dz_[i])) - (1./dt)*((theta_np1_m[i]-theta_n[i]) + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) + s[i]; - - // r_mpfd is an approximation of how far one is away from the solution. So in case of Dirichlet boundaries, we are *at* the solution: - if(aTopBC==DIRICHLET) r_mpfd[uppernode]=0.; - if(aBottomBC==DIRICHLET) r_mpfd[lowernode]=0.; - - r_mpfd2[i]=r_mpfd[i]; // We make a copy for use with DGTSV and TDMA solvers. - if(WriteOutNumerics_Level3==true) { - std::cout << "SOLVER: i=" << i << std::scientific << " - r_mpfd=" << r_mpfd[i] << " term_up=" << term_up[i] << " term_down=" << term_down[i] << " a=" << ainv[i*(uppernode+1)+i]/*a[i][i]*/ << "adl=" << adl[i] << " adu=" << adu[i] << " [" << K[i] << " - " << C[i] << "]\n" << std::fixed; + r_mpfd = AssembleRHS(lowernode, uppernode, h_np1_m, theta_n, theta_np1_m, theta_i_n, theta_i_np1_m, s, dt, rho, k_np1_m_im12, k_np1_m_ip12, aTopBC, TopFluxRate, aBottomBC, BottomFluxRate, Xdata, Salinity, SALINITY_MIXING); + r_mpfd2 = r_mpfd; // We make a copy for use with DGTSV and TDMA solvers. + + // Check stability criterion for salinity transport for sea ice simulations + if(variant=="SEAICE") { + if(SalinityTransportSolver==SalinityTransport::EXPLICIT && Salinity.VerifyCFL(dt)==false) { + printf("CFL failed for dt=%.10f --> reducing time step\n", dt); + solver_result=-1; + } else if((SalinityTransportSolver==SalinityTransport::IMPLICIT || SalinityTransportSolver==SalinityTransport::IMPLICIT2) && Salinity.VerifyImplicitDt(dt)==false) { + printf("ImplicitLimit failed for dt=%.10f --> reducing time step\n", dt); + solver_result=-1; } } + //Before solving the system of equations, reset convergence tracking variables: track_accuracy_h=0.; track_accuracy_theta=0.; - track_trigger_layer_accuracy=-1; accuracy=-1.; //-1 is a flag. accuracy can only be positive, so when it is negative, we know that no layer did NOT converged yet. - trigger_layer_accuracy=-1; //-1 is a flag. when it is negative, we know that no layer was NOT converged yet. - int trigger_layer_blowup=-1; //-1 is a flag. when it is negative, we know that no layer was NOT converged yet. max_delta_h=0.; boolConvergence=true; //We initialize it as true, and set it to false when necessary. mass2=0.; @@ -1861,7 +1623,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) if (ActiveSolver==TDMA) { // Note: TDMA is very rapid, but has the problem that when elements in the matrix differ order of magnitudes, rounding errors can occur that destroy accuracy. // For this reason, it is better to use DGTSV solver, which does partial pivoting to prevent this. See: http://en.wikipedia.org/wiki/Pivot_element#Partial_and_complete_pivoting - const int matrixdimensions=(uppernode-lowernode)+1; + const size_t matrixdimensions=(uppernode-lowernode)+1; solver_result=TDMASolver(matrixdimensions, &adl[0], &ad[0], &adu[0], &r_mpfd[0], &r_mpfd2[0]); } @@ -1869,7 +1631,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) #ifdef CLAPACK // Solver for Tridiagonal matrices, with partial pivoting. int info=0; - const int matrixdimensions=(uppernode-lowernode)+1; + const int matrixdimensions=int((uppernode-lowernode)+1); // Cast from size_t to int is necessary, to interface correctly with LAPACK dgtsv_. const int vectordimensions=1; dgtsv_( (integer*) &matrixdimensions, (integer*) &vectordimensions, &adl[0], &ad[0], &adu[0], &r_mpfd2[0], (integer*) &matrixdimensions, (integer*) &info ); @@ -1880,10 +1642,10 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) // has not been computed. The factorization has not been // completed unless i = N. if(AllowSwitchSolver==true) { - if(WriteOutNumerics_Level0==true) std::cout << "ERROR in ReSolver1d.cc: DGTSV failed [info = " << info << "]. Trying DGESVD/DGESDD...\n"; + if(WriteDebugOutput) std::cout << "ERROR in ReSolver1d.cc: DGTSV failed [info = " << info << "]. Trying DGESVD/DGESDD...\n"; ActiveSolver=DGESVD; } else { - if(WriteOutNumerics_Level0==true) std::cout << "ERROR in ReSolver1d.cc: DGTSV failed [info = " << info << "]. Trying with smaller time step...\n"; + if(WriteDebugOutput) std::cout << "ERROR in ReSolver1d.cc: DGTSV failed [info = " << info << "]. Trying with smaller time step...\n"; solver_result=-1; } } @@ -1895,7 +1657,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) if(ActiveSolver==DGESVD) { #ifdef CLAPACK //Do Moore-Penrose matrix inversion, using singular value decomposition (SVD), so we can write: H = A' * R - solver_result=pinv((uppernode-lowernode)+1, (uppernode-lowernode)+1, (uppernode-lowernode)+1, &ainv[0]); + solver_result=pinv(int((uppernode-lowernode)+1), int((uppernode-lowernode)+1), int((uppernode-lowernode)+1), &ainv[0]); #else throw InvalidArgumentException("you cannot use solver DGESVD when libraries BLAS and LAPACK are not installed. Either install these libraries, or choose solver TDMA", AT); #endif @@ -1907,12 +1669,12 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //This is a little bit complicated. The problem started when we did soil freezing. If then suddenly an isnan is detected somewhere in the model domain, some part of the soil is already through the phasechange function, other parts not (maybe). //It is difficult to revert this soil freezing, so therefore, we need first to loop over i to determine the complete solution vector delta_h, and then an other loop over i to apply the new solution. //However, if a proper way to revert soil freezing is made, this extra loop can be removed. - for (i = uppernode; i >= lowernode; i--) { + for (i = lowernode; i <= uppernode; i++) { //Determine delta h: if(ActiveSolver==DGESVD) { delta_h[memstate%nmemstates][i]=0.; //Note: after inverting, ainv is non tridiagonal, so we have to loop over all elements. - for (k = uppernode; k >= lowernode; k--) { + for (k = lowernode; k <= uppernode; k++) { //for (k = std::min(uppernode, i+1); k >= std::max(lowernode, i-1); k--) { delta_h[memstate%nmemstates][i]+=ainv[i*(uppernode+1)+k]*r_mpfd[k]; } @@ -1922,6 +1684,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) if(std::isnan(delta_h[memstate%nmemstates][i])==true || std::isinf(delta_h[memstate%nmemstates][i])==true) { solver_result=-1; } + delta_h[memstate%nmemstates][i]/=rho[i]; } } @@ -1938,222 +1701,231 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) delta_theta[lowernode]=0.; } - for (i = uppernode; i >= lowernode; i--) { - if(activelayer[i]==true) { - //Keep track of the maximum delta h, to detect possible model blow-ups. - if(fabs(delta_h[memstate%nmemstates][i])>max_delta_h) { // If change is too big and we are allowed to do a rewind, don't check for accuracy - //delta_h[memstate%nmemstates][i]=0.; - trigger_layer_blowup=i; - max_delta_h=fabs(delta_h[memstate%nmemstates][i]); - h_np1_mp1[i]=h_np1_m[i]; - theta_np1_mp1[i]=theta_np1_m[i]; - delta_theta_i[i]=0.; - delta_theta[i]=1E10; //Set delta_theta[i] to any value, to make sure the convergence test will fail. - } - //if(not(max_delta_h>MAX_ALLOWED_DELTA_H) || niter>MAX_ITER+1) { //If it is, there is a big chance the call to fromHtoTHETA will fail because of floating point exceptions (overflows). In this case, we will force a rewind later on, so the solution does not matter anymore. - //The second clause means we cannot increase time step anymore, so we should just try the solution. - if(solver_result!=-1) { - //Apply solution - h_np1_mp1[i]=h_np1_m[i]+delta_h[memstate%nmemstates][i]; - - //Calculate theta - theta_np1_mp1[i]=fromHtoTHETAforICE(h_np1_mp1[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_i_np1_m[i]); - - //Calculate temperature change of soil layers to reflect heat advected by the flowing water - if(i0) ? (((((h_np1_m[i]+delta_h[memstate%nmemstates][i])-(h_np1_m[i-1]+delta_h[memstate%nmemstates][i-1]))/dz_up[i-1])+cos_sl)*k_np1_m_ip12[i-1]*dt) : 0; //Units: [m^3/m^2] - - //Calculate intermediate state variables of this layer - const double tmp_theta_air = 1. - theta_i_n[i] - (theta_np1_mp1[i] + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) - EMS[SnowpackElement[i]].theta[SOIL]; //Units: [m^3 m^-3] - const double tmp_rho = (Constants::density_ice * theta_i_n[i] + Constants::density_water * (theta_np1_mp1[i] + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) + EMS[SnowpackElement[i]].soil[SOIL_RHO] * EMS[SnowpackElement[i]].theta[SOIL]); //Units: [kg m-3] - const double tmp_c_p = (Constants::density_air * tmp_theta_air * Constants::specific_heat_air //Units: [J kg-1 K-1] - + Constants::density_ice * theta_i_n[i] * Constants::specific_heat_ice - + Constants::density_water * (theta_np1_mp1[i] + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) * Constants::specific_heat_water - + EMS[SnowpackElement[i]].soil[SOIL_RHO] * EMS[SnowpackElement[i]].theta[SOIL] * EMS[SnowpackElement[i]].soil[SOIL_C] - ) / tmp_rho; - delta_Te_adv_i[i]=0.; - if (tmp_flux_above>0.) { //Positve flux from above (= influx in current layer) - //Advected heat - const double tmp_adv_heat = ((EMS[SnowpackElement[i+1]].Te + delta_Te_adv[i+1] + delta_Te[i+1]) - (EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te[i])) * Constants::density_water * tmp_flux_above * Constants::specific_heat_water; //Units [J/m^2] - delta_Te_adv_i[i] = (tmp_adv_heat) / (tmp_c_p * tmp_rho * EMS[SnowpackElement[i]].L); - } - if (tmp_flux_below<0.) { //Negative flux from below (=influx in current layer) - //Advected heat - const double tmp_adv_heat = ((EMS[SnowpackElement[i-1]].Te + delta_Te_adv[i-1] + delta_Te[i-1]) - (EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te[i])) * Constants::density_water * (-1.*tmp_flux_below) * Constants::specific_heat_water; //Units [J/m^2] - //In rare cases, we may have inflow from above AND below, so we add (+=) the temperature change due to heat advection - delta_Te_adv_i[i] += (tmp_adv_heat) / (tmp_c_p * tmp_rho * EMS[SnowpackElement[i]].L); - } + if (Xdata.Seaice != NULL && solver_result != -1) { + AssembleRHS(lowernode, uppernode, h_np1_m, theta_n, theta_np1_m, theta_i_n, theta_i_np1_m, s, dt, rho, k_np1_m_im12, k_np1_m_ip12, aTopBC, TopFluxRate, aBottomBC, BottomFluxRate, Xdata, Salinity, SALINITY_MIXING); + if(SalinityTransportSolver==SalinityTransport::EXPLICIT && Salinity.VerifyCFL(dt)==false) { + printf("CFL failed for dt=%.10f @ second time\n", dt); + solver_result=-1; + } + if((SalinityTransportSolver==SalinityTransport::IMPLICIT || SalinityTransportSolver==SalinityTransport::IMPLICIT2) && Salinity.VerifyImplicitDt(dt)==false) { + printf("ImplicitLimit failed for dt=%.10f @ second time\n", dt); + solver_result=-1; + } + } - //Repartition ice/water based on new head - if(AllowSoilFreezing==true) { - size_t BS_iter=0; //Counting the number of iterations - const double hw0=h_np1_mp1[i]; - T_melt[i]=T_0+((Constants::g*T_0)/delF)*hw0; - // Bisection-Secant method, see wikipedia: http://en.wikipedia.org/wiki/False_position_method - // fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*T_melt[i]))*(EMS[SnowpackElement[i]].Te-T_melt[i]), theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i]) - // + - // (theta_i_np1_mp1[i]*(Constants::density_ice/Constants::density_water)) - // - - // fromHtoTHETA(hw0, theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i]); - // = 0. - // Solving this equation for theta_i_np1_mp1[i] (which influences theta_np1_mp1 and Te) - - // So the new liquid water content basically is the same equation, but we have to adapt EMS[SnowpackElement[i]].Te to the amount of ice we create (delta_i). - if((theta_i_np1_m[i] > 0. && (EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i]) > T_melt[i]) || (EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i]) < T_melt[i]) { - - if(WriteOutNumerics_Level2==true) { - const double tmp_T = EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i]; - std::cout << "BEFORE [" << i << std::fixed << std::setprecision(15) << "]; theta_w: " << theta_np1_mp1[i] << " theta_i_np1_m: " << theta_i_np1_m[i] << " theta_s: " << theta_s[i] << std::setprecision(3) << " T: " << tmp_T << std::setprecision(8) << " rho: " << tmp_rho << " cp: " << tmp_c_p << " ColdC: " << tmp_rho * tmp_c_p * tmp_T * EMS[SnowpackElement[i]].L << "\n" << std::setprecision(6); - } - //Determine maximum possible change in ice content, which should be between 0, and theta_water > theta_d (all possible water freezes). Then maximum ice content is determined based on the temperature difference between element and T_melt. - //const double max_delta_ice=(std::min((theta_np1_mp1[i]-theta_d[i])*(Constants::density_ice/Constants::density_water), std::max(0., T_melt[i]-(EMS[SnowpackElement[i]].Te/*+delta_Te*/)) * ((EMS[SnowpackElement[i]].c[TEMPERATURE] * EMS[i].Rho) / ( Constants::density_ice * Constants::lh_fusion )))); - double max_delta_ice; - if((EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i]) > T_melt[i]) { - // Melt: either all ice will disappear, or a fraction based on available energy - max_delta_ice=-1.*theta_i_n[i]; - } else { - // Freeze: either all available water will freeze, or a fraction based on available energy. - max_delta_ice=(theta_np1_mp1[i]-0.)*(Constants::density_water/Constants::density_ice); - } + i = uppernode + 1; + while (i-- > lowernode) { + //Keep track of the maximum delta h, to detect possible model blow-ups. + if(fabs(delta_h[memstate%nmemstates][i])>max_delta_h) { // If change is too big and we are allowed to do a rewind, don't check for accuracy + //delta_h[memstate%nmemstates][i]=0.; + max_delta_h=fabs(delta_h[memstate%nmemstates][i]); + h_np1_mp1[i]=h_np1_m[i]; + theta_np1_mp1[i]=theta_np1_m[i]; + delta_theta_i[i]=0.; + delta_theta[i]=1E10; //Set delta_theta[i] to any value, to make sure the convergence test will fail. + } - bool BS_converged=false; - double ak=0., bk=0., ck=0.; //These are values for changes in ice content. - double delta_Te_ak=0., delta_Te_bk=0., delta_Te_ck=0., delta_w_ak=0., delta_w_bk=0., delta_w_ck=0.; - double ck1=0, delta_Te_ck1=0., delta_w_ck1=0.; - if(max_delta_ice>0.) { - ak=0.; - bk=max_delta_ice; - } else { - ak=max_delta_ice; - bk=0.; + //if(not(max_delta_h>MAX_ALLOWED_DELTA_H) || niter>MAX_ITER+1) { //If it is, there is a big chance the call to fromHtoTHETA will fail because of floating point exceptions (overflows). In this case, we will force a rewind later on, so the solution does not matter anymore. + //The second clause means we cannot increase time step anymore, so we should just try the solution. + if(solver_result!=-1) { + //Apply solution + h_np1_mp1[i]=h_np1_m[i]+delta_h[memstate%nmemstates][i]; + + //Calculate theta + theta_np1_mp1[i]=EMS[i].VG.fromHtoTHETAforICE(h_np1_mp1[i], theta_i_np1_m[i]); + + //Calculate temperature change of soil layers to reflect heat advected by the flowing water + if(ilowernode) ? (((((h_np1_m[i]+delta_h[memstate%nmemstates][i])-(h_np1_m[i-1]+delta_h[memstate%nmemstates][i-1]))/dz_up[i-1])+Xdata.cos_sl)*k_np1_m_ip12[i-1]*dt) : 0; //Units: [m^3/m^2] + + //Calculate intermediate state variables of this layer + const double tmp_theta_air = 1. - theta_i_n[i] - (theta_np1_mp1[i] + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) - EMS[i].theta[SOIL]; //Units: [m^3 m^-3] + const double tmp_rho = (Constants::density_ice * theta_i_n[i] + Constants::density_water * (theta_np1_mp1[i] + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) + EMS[i].soil[SOIL_RHO] * EMS[i].theta[SOIL]); //Units: [kg m-3] + const double tmp_c_p = (Constants::density_air * tmp_theta_air * Constants::specific_heat_air //Units: [J kg-1 K-1] + + Constants::density_ice * theta_i_n[i] * Constants::specific_heat_ice + + Constants::density_water * (theta_np1_mp1[i] + (theta_i_np1_m[i]-theta_i_n[i])*(Constants::density_ice/Constants::density_water)) * Constants::specific_heat_water + + EMS[i].soil[SOIL_RHO] * EMS[i].theta[SOIL] * EMS[i].soil[SOIL_C] + ) / tmp_rho; + delta_Te_adv_i[i]=0.; + if (tmp_flux_above>0.) { //Positve flux from above (= influx in current layer) + //Advected heat + const double tmp_adv_heat = ((EMS[i+1].Te + delta_Te_adv[i+1] + delta_Te[i+1]) - (EMS[i].Te + delta_Te_adv[i] + delta_Te[i])) * Constants::density_water * tmp_flux_above * Constants::specific_heat_water; //Units [J/m^2] + delta_Te_adv_i[i] = (tmp_adv_heat) / (tmp_c_p * tmp_rho * EMS[i].L); + } + if (tmp_flux_below<0.) { //Negative flux from below (=influx in current layer) + //Advected heat + const double tmp_adv_heat = ((EMS[i-1].Te + delta_Te_adv[i-1] + delta_Te[i-1]) - (EMS[i].Te + delta_Te_adv[i] + delta_Te[i])) * Constants::density_water * (-1.*tmp_flux_below) * Constants::specific_heat_water; //Units [J/m^2] + //In rare cases, we may have inflow from above AND below, so we add (+=) the temperature change due to heat advection + delta_Te_adv_i[i] += (tmp_adv_heat) / (tmp_c_p * tmp_rho * EMS[i].L); + } + + //Repartition ice/water based on new head + if(matrix==true) { + unsigned int BS_iter=0; //Counting the number of iterations + const double hw0=std::min(EMS[i].VG.h_e, h_np1_mp1[i]); + EMS[i].meltfreeze_tk=Constants::meltfreeze_tk+((Constants::g*Constants::meltfreeze_tk)/Constants::lh_fusion)*hw0; + // Bisection-Secant method, see wikipedia: http://en.wikipedia.org/wiki/False_position_method + // fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*(EMS[i].Te-EMS[i].meltfreeze_tk)) + // + + // (theta_i_np1_mp1[i]*(Constants::density_ice/Constants::density_water)) + // - + // fromHtoTHETA(hw0); + // = 0. + // Solving this equation for theta_i_np1_mp1[i] (which influences theta_np1_mp1 and Te) + + // So the new liquid water content basically is the same equation, but we have to adapt EMS[i].Te to the amount of ice we create (delta_i). + if((theta_i_np1_m[i] > 0. && (EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i]) > EMS[i].meltfreeze_tk) || (EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i]) < EMS[i].meltfreeze_tk) { + + if(WriteDebugOutput) { + const double tmp_T = EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i]; + std::cout << "BEFORE [" << i << std::fixed << std::setprecision(15) << "]; theta_w: " << theta_np1_mp1[i] << " theta_i_np1_m: " << theta_i_np1_m[i] << " theta_s: " << EMS[i].VG.theta_s << std::setprecision(3) << " T: " << tmp_T << std::setprecision(8) << " rho: " << tmp_rho << " cp: " << tmp_c_p << " ColdC: " << tmp_rho * tmp_c_p * tmp_T * EMS[i].L << "\n" << std::setprecision(6); + } + + bool BS_converged=false; + double ak=0., bk=0., ck=0.; //These are values for changes in ice content. + double delta_Te_ak=0., delta_Te_bk=0., delta_Te_ck=0., delta_w_ak=0., delta_w_bk=0., delta_w_ck=0.; + double ck1=0, delta_Te_ck1=0., delta_w_ck1=0.; + + ak=-1.*theta_i_np1_m[i]; // Left starting point = all ice melts + bk=(theta_np1_mp1[i]-0.)*(Constants::density_water/Constants::density_ice); // Right starting point = all water freezes + + // Deal with special cases: + // 1) So much energy available that all ice will melt (note: this case will not be properly solved by Bisection-Secant method.) + if((EMS[i].meltfreeze_tk-(EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i])) * ((tmp_c_p * tmp_rho) / ( Constants::density_ice * Constants::lh_fusion )) < -1.*theta_i_n[i] && BS_converged==false) { + ck=-1.*theta_i_np1_m[i]; + delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); + delta_Te_ck=((theta_i_np1_m[i] - theta_i_n[i]) + ck) / ((tmp_c_p * tmp_rho) / ( Constants::density_ice * Constants::lh_fusion )); //Change in element temperature associated with change in ice content + if(WriteDebugOutput) { + const double tmp_T = EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i] + delta_Te_ck; + std::cout << "BS_ITER [" << BS_iter << std::scientific << "], case 2: a=" << ak << " b=" << bk << " c=" << ck << " " << delta_w_ck << " " << tmp_T << " " << EMS[i].meltfreeze_tk << ": fa: " << (delta_w_ak + ak*(Constants::density_ice/Constants::density_water)) << " fb: " << (delta_w_bk + bk*(Constants::density_ice/Constants::density_water)) << " fc: " << (delta_w_ck + ck*(Constants::density_ice/Constants::density_water)) << "\n" << std::fixed; } - // Deal with special cases: - // 1) So much energy available that all ice will melt (note: this case will not be properly solved by Bisection-Secant method.) - if((T_melt[i]-(EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i])) * ((tmp_c_p * tmp_rho) / ( Constants::density_ice * Constants::lh_fusion )) < -1.*theta_i_n[i] && BS_converged==false) { - ck=-1.*theta_i_np1_m[i]; - delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); + BS_converged=true; + } + // 2) Very small temperature difference or very small possible change in ice content + if(fabs(ak-bk)fabs(delta_w_ck1+ck1*(Constants::density_ice/Constants::density_water))) { - ck=ck1; - delta_Te_ck=delta_Te_ck1; - delta_w_ck=delta_w_ck1; - } - if(WriteOutNumerics_Level3==true) { - const double tmp_T = EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i] + delta_Te_ck; - std::cout << "BS_ITER [" << BS_iter << std::scientific << "]: a=" << ak << " b=" << bk << " c=" << ck << " (max: " << max_delta_ice << ") " << delta_w_ck << " " << tmp_T << " " << T_melt[i] << ": fa: " << (delta_w_ak + ak*(Constants::density_ice/Constants::density_water)) << " fb: " << (delta_w_bk + bk*(Constants::density_ice/Constants::density_water)) << " fc: " << (delta_w_ck + ck*(Constants::density_ice/Constants::density_water)) << "\n" << std::fixed; - } - //Now check if convergence is achieved - if(fabs(delta_w_ck + ck*(Constants::density_ice/Constants::density_water)) < SF_epsilon) { - delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); //Make delta in water equal to ice, so we keep mass-balance. - BS_converged=true; - } else if(fabs(delta_w_ak + ak*(Constants::density_ice/Constants::density_water)) < SF_epsilon) { + BS_converged=true; + } + while (BS_converged==false && BS_iter < BS_MAX_ITER) { + BS_iter++; + delta_Te_ak=((theta_i_np1_m[i] - theta_i_n[i]) + ak) / ((tmp_c_p * tmp_rho) / ( Constants::density_ice * Constants::lh_fusion )); //Change in element temperature associated with change in ice content + delta_Te_bk=((theta_i_np1_m[i] - theta_i_n[i]) + bk) / ((tmp_c_p * tmp_rho) / ( Constants::density_ice * Constants::lh_fusion )); //Change in element temperature associated with change in ice content + delta_w_ak=(EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*std::min(0., (EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_ak)-EMS[i].meltfreeze_tk))) - theta_np1_mp1[i]; + delta_w_bk=(EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*std::min(0., (EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_bk)-EMS[i].meltfreeze_tk))) - theta_np1_mp1[i]; + //Now calculate bisect + ck1=(ak+bk)/2.; + delta_Te_ck1=((theta_i_np1_m[i] - theta_i_n[i]) + ck1) / ((tmp_c_p * tmp_rho) / ( Constants::density_ice * Constants::lh_fusion )); //Change in element temperature associated with change in ice content + delta_w_ck1=(EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*std::min(0., (EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_ck1)-EMS[i].meltfreeze_tk))) - theta_np1_mp1[i]; + //Now check secant + ck=((delta_w_bk + bk*(Constants::density_ice/Constants::density_water))*ak - (delta_w_ak + ak*(Constants::density_ice/Constants::density_water))*bk) / ((delta_w_bk + bk*(Constants::density_ice/Constants::density_water)) - (delta_w_ak + ak*(Constants::density_ice/Constants::density_water))); + delta_Te_ck=((theta_i_np1_m[i] - theta_i_n[i]) + ck) / ((tmp_c_p * tmp_rho) / ( Constants::density_ice * Constants::lh_fusion )); //Change in element temperature associated with change in ice content + delta_w_ck=(EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*std::min(0., (EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_ck)-EMS[i].meltfreeze_tk))) - theta_np1_mp1[i]; + //Now check if bisect or secant is a better approximation + const double err_ak = EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*((EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_ak)-EMS[i].meltfreeze_tk)) + (((theta_i_np1_m[i] - theta_i_n[i]) + ak)*(Constants::density_ice/Constants::density_water)) - EMS[i].VG.fromHtoTHETA(hw0); + const double err_bk = EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*((EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_bk)-EMS[i].meltfreeze_tk)) + (((theta_i_np1_m[i] - theta_i_n[i]) + bk)*(Constants::density_ice/Constants::density_water)) - EMS[i].VG.fromHtoTHETA(hw0); + const double err_ck = EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*((EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_ck)-EMS[i].meltfreeze_tk)) + (((theta_i_np1_m[i] - theta_i_n[i]) + ck)*(Constants::density_ice/Constants::density_water)) - EMS[i].VG.fromHtoTHETA(hw0); + const double err_ck1 = EMS[i].VG.fromHtoTHETA(hw0+(Constants::lh_fusion/(Constants::g*EMS[i].meltfreeze_tk))*((EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_ck1)-EMS[i].meltfreeze_tk)) + (((theta_i_np1_m[i] - theta_i_n[i]) + ck1)*(Constants::density_ice/Constants::density_water)) - EMS[i].VG.fromHtoTHETA(hw0); + double err_c = err_ck; + //if(fabs(err_ck) > fabs(err_ck1)) { // For now, only consider bisect. Secant calculation needs verification. + ck=ck1; + delta_Te_ck=delta_Te_ck1; + delta_w_ck=delta_w_ck1; + err_c = err_ck1; + //} + if(WriteDebugOutput) { + const double tmp_T = EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i] + delta_Te_ck; + std::cout << "BS_ITER [" << BS_iter << std::scientific << "]: a=" << ak << " b=" << bk << " c=" << ck << " " << delta_w_ck << " " << tmp_T << " " << EMS[i].meltfreeze_tk << ": fa: " << (delta_w_ak + ak*(Constants::density_ice/Constants::density_water)) << " fb: " << (delta_w_bk + bk*(Constants::density_ice/Constants::density_water)) << " fc: " << (delta_w_ck + ck*(Constants::density_ice/Constants::density_water)) << "\n" << std::fixed; + } + //Now check if convergence is achieved + if(fabs(err_c) < SF_epsilon) { + delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); //Make delta in water equal to ice, so we keep mass-balance. + BS_converged=true; + } else if(fabs(err_ak) < SF_epsilon) { + ck=ak; + delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); //Make delta in water equal to ice, so we keep mass-balance. + delta_Te_ck=delta_Te_ak; + BS_converged=true; + } else if(fabs(err_bk) < SF_epsilon) { + ck=bk; + delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); //Make delta in water equal to ice, so we keep mass-balance. + delta_Te_ck=delta_Te_bk; + BS_converged=true; + } else if(err_ak * err_bk > 0.) { //Multiply to check if same sign. If so, the root is outside of the search interval interval. + BS_converged=true; + if(fabs(err_ak) < fabs(err_bk)) { ck=ak; - delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); //Make delta in water equal to ice, so we keep mass-balance. delta_Te_ck=delta_Te_ak; - BS_converged=true; - } else if(fabs(delta_w_bk + bk*(Constants::density_ice/Constants::density_water)) < SF_epsilon) { + } else { ck=bk; - delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); //Make delta in water equal to ice, so we keep mass-balance. delta_Te_ck=delta_Te_bk; - BS_converged=true; + } + delta_w_ck=-1.*(ck*(Constants::density_ice/Constants::density_water)); //Make delta in water equal to ice, so we keep mass-balance. + } else { + //And determine whether to update the left or right point + if(err_ak * err_c > 0.) { //Multiply to check if same sign + ak=ck; } else { - //And determine whether to update the left or right point - if((delta_w_ck + ck*(Constants::density_ice/Constants::density_water)) * (delta_w_ak + ak*(Constants::density_ice/Constants::density_water)) > 0.) { //Multiply to check if same sign - ak=ck; - } else { - bk=ck; - } + bk=ck; } } - if(BS_converged==false) { - if(WriteOutNumerics_Level0==true) std::cout << "[W] ReSolver1d.cc: Bisect-Secant method failed to converge in soil freezing with dt = " << dt << ".\n"; - if(WriteOutNumerics_Level1==true) { - const double tmp_T = EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i] + delta_Te_ck; - std::cout << " -- BS_ITER [" << BS_iter << std::scientific << "]: a=" << ak << " b=" << bk << " c=" << ck << " (max: " << max_delta_ice << ") " << delta_w_ck << " " << tmp_T << " " << T_melt[i] << ": fa: " << (delta_w_ak + ak*(Constants::density_ice/Constants::density_water)) << " fb: " << (delta_w_bk + bk*(Constants::density_ice/Constants::density_water)) << " fc: " << (delta_w_ck + ck*(Constants::density_ice/Constants::density_water)) << "\n" << std::fixed; - std::cout << " -- " << std::setprecision(15) << T_melt[i] << " " << EMS[SnowpackElement[i]].Te << " " << delta_Te_adv[i] << " " << delta_Te_adv_i[i] << " " << delta_Te[i] << " " << EMS[SnowpackElement[i]].theta[WATER] << " " << EMS[SnowpackElement[i]].theta[ICE] << "\n" << std::setprecision(6); - } - max_delta_h=2.*MAX_ALLOWED_DELTA_H; - solver_result=-1; - } else { - //Final solution - const double tmp_delta_i=ck; - const double tmp_delta_w=delta_w_ck; - const double tmp_delta_Te=delta_Te_ck; - //Apply final solution - delta_Te_i[i]=tmp_delta_Te; - theta_i_np1_mp1[i]=theta_i_np1_m[i]+tmp_delta_i; - theta_np1_mp1[i]+=tmp_delta_w; + } + if(BS_converged==false) { + std::cout << "[W] [" << date.toString(mio::Date::ISO) << "] ReSolver1d.cc: Bisect-Secant method failed to converge in soil freezing with dt = " << dt << ".\n"; + if(WriteDebugOutput) { + const double tmp_T = EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i] + delta_Te_ck; + std::cout << " -- BS_ITER [" << BS_iter << std::scientific << "]: a=" << ak << " b=" << bk << " c=" << ck << " " << delta_w_ck << " " << tmp_T << " " << EMS[i].meltfreeze_tk << ": fa: " << (delta_w_ak + ak*(Constants::density_ice/Constants::density_water)) << " fb: " << (delta_w_bk + bk*(Constants::density_ice/Constants::density_water)) << " fc: " << (delta_w_ck + ck*(Constants::density_ice/Constants::density_water)) << "\n" << std::fixed; + std::cout << " -- " << std::setprecision(15) << EMS[i].meltfreeze_tk << " " << EMS[i].Te << " " << delta_Te_adv[i] << " " << delta_Te_adv_i[i] << " " << delta_Te[i] << " " << EMS[i].theta[WATER] << " " << EMS[i].theta[ICE] << "\n" << std::setprecision(6); } + max_delta_h=2.*MAX_ALLOWED_DELTA_H; + solver_result=-1; } else { - theta_i_np1_mp1[i]=0.; - theta_np1_mp1[i]=fromHtoTHETAforICE(h_np1_mp1[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], 0.); + //Final solution + const double tmp_delta_i=ck; + const double tmp_delta_w=delta_w_ck; + const double tmp_delta_Te=delta_Te_ck; + //Apply final solution + delta_Te_i[i]=tmp_delta_Te; + theta_i_np1_mp1[i]=theta_i_np1_m[i]+tmp_delta_i; + theta_np1_mp1[i]+=tmp_delta_w; } - //Update BS-solver statistics - bs_stats_totiter+=BS_iter; - if(BS_iter>bs_stats_maxiter) bs_stats_maxiter=BS_iter; - if(WriteOutNumerics_Level2==true) - std::cout << "AFTER [" << i << std::setprecision(15) << "]: theta_w: " << theta_np1_mp1[i] << " theta_i_np1_m: " << theta_i_np1_mp1[i] << " theta_s:" << theta_s[i] << std::setprecision(3) << " T: " << EMS[SnowpackElement[i]].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i] << " (niter=" << BS_iter << ")\n" << std::setprecision(6); - } //END OF REPARTITIONING ICE/WATER + } else { + theta_i_np1_mp1[i]=0.; + theta_np1_mp1[i]=EMS[i].VG.fromHtoTHETAforICE(h_np1_mp1[i], 0.); + } + //Update BS-solver statistics + bs_stats_totiter+=BS_iter; + if(BS_iter>bs_stats_maxiter) bs_stats_maxiter=BS_iter; + if(WriteDebugOutput) + std::cout << "AFTER [" << i << std::setprecision(15) << "]: theta_w: " << theta_np1_mp1[i] << " theta_i_np1_m: " << theta_i_np1_mp1[i] << " theta_s:" << EMS[i].VG.theta_s << std::setprecision(3) << " T: " << EMS[i].Te + delta_Te_adv[i] + delta_Te_adv_i[i] + delta_Te[i] + delta_Te_i[i] << " (niter=" << BS_iter << ")\n" << std::setprecision(6); + } else { //END OF REPARTITIONING ICE/WATER + theta_i_np1_mp1[i]=theta_i_np1_m[i]; } - - delta_theta[i]=theta_np1_mp1[i]-theta_np1_m[i]; - delta_theta_i[i]=theta_i_np1_mp1[i]-theta_i_np1_m[i]; - } else { - // Solver failed, trigger rewind - max_delta_h=2.*MAX_ALLOWED_DELTA_H; } + delta_theta[i]=theta_np1_mp1[i]-theta_np1_m[i]; + delta_theta_i[i]=theta_i_np1_mp1[i]-theta_i_np1_m[i]; } else { - theta_np1_mp1[i]=theta_n[i]; - h_np1_mp1[i]=h_n[i]; - delta_theta[i]=0.; - delta_theta_i[i]=0.; - delta_h[memstate%nmemstates][i]=0.; + // Solver failed, trigger rewind + max_delta_h=2.*MAX_ALLOWED_DELTA_H; } //Update mass balance mass2+=(theta_np1_mp1[i]+(theta_i_np1_mp1[i]*(Constants::density_ice/Constants::density_water)))*dz[i]; - if(WriteOutNumerics_Level2==true) { + if(WriteDebugOutput) { std::cout << "ITER: " << niter << " i: " << i << std::scientific << std::setprecision(10) << " --- h1: " << h_np1_m[i] << " d_h: " << delta_h[memstate%nmemstates][i] << " h2: " << h_np1_mp1[i] << std::setprecision(12) << " --- theta1: " << theta_np1_m[i] << " d_theta: " << delta_theta[i] << " theta2: " << theta_np1_mp1[i] << "\n" << std::setprecision(6) << std::fixed; } @@ -2164,16 +1936,15 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Note: because we want to be able to very accurately determine the flux over the snow-soil boundary (for MS_SNOW_RUNOFF) and model boundaries (for MS_SOIL_RUNOFF), //we ALWAYS have to assess the accuracy in head in this region! If we don't do this, then in case of dry soil layers, the estimated pressure head can be quite //inaccurate, leading to a completely wrong estimation of these fluxes! - if(Se[i]>convergencecriterionthreshold || i==nsoillayers_richardssolver-1 || i==nsoillayers_richardssolver || i==lowernode || i==lowernode-1) { + if(Se[i]>convergencecriterionthreshold || i==Xdata.SoilNode-1 || i==Xdata.SoilNode || i==lowernode || i==lowernode-1) { if ((i!=lowernode || aBottomBC==NEUMANN) && (i!=uppernode || aTopBC==NEUMANN)) { //First check general accuarcy: if(fabs(delta_h[memstate%nmemstates][i])>track_accuracy_h) { - track_trigger_layer_accuracy=i; track_accuracy_h=fabs(delta_h[memstate%nmemstates][i]); } //Now check against convergence criterion: - if(fabs(delta_h[memstate%nmemstates][i])>REQUIRED_ACCURACY_H) { - trigger_layer_accuracy=i; + if(fabs(delta_h[memstate%nmemstates][i]/h_np1_m[i]) > REQUIRED_ACCURACY_H) { //relative accuracy + //if(fabs(delta_h[memstate%nmemstates][i])>REQUIRED_ACCURACY_H) { //absolute accuracy accuracy=fabs(delta_h[memstate%nmemstates][i]); } } @@ -2181,17 +1952,15 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Absolute accuracy in theta. This doesn't behave stable, especially during saturated soil conditions, when the accuracy is set too low. //See Huang (1996), which proposes this, and also discusses the need for a higher accuracy: - //if(not(Se[i]>convergencecriterionthreshold || i==uppernode || i==lowernode || i==nsoillayers_richardssolver-1 || i==nsoillayers_richardssolver)) { - if(not(Se[i]>convergencecriterionthreshold || i==nsoillayers_richardssolver-1 || i==nsoillayers_richardssolver || i==lowernode || i==lowernode-1)) { + //if(not(Se[i]>convergencecriterionthreshold || i==uppernode || i==lowernode || i==Xdata.SoilNode-1 || i==Xdata.SoilNode)) { + if(!(Se[i]>convergencecriterionthreshold || i==Xdata.SoilNode-1 || i==Xdata.SoilNode || i==lowernode || i==lowernode-1)) { if ((i!=lowernode || aBottomBC==NEUMANN) && (i!=uppernode || aTopBC==NEUMANN)) { //First check general accuarcy: if(fabs(delta_theta[i]+delta_theta_i[i]*(Constants::density_ice/Constants::density_water))>track_accuracy_theta) { - track_trigger_layer_accuracy=i; track_accuracy_theta=fabs(delta_theta[i]+delta_theta_i[i]*(Constants::density_ice/Constants::density_water)); } //Now check against convergence criterion: - if (fabs(delta_theta[i]+delta_theta_i[i]*(Constants::density_ice/Constants::density_water)) > REQUIRED_ACCURACY_THETA ) { - trigger_layer_accuracy=i; + if (fabs(delta_theta[i]+delta_theta_i[i]*(Constants::density_ice/Constants::density_water)) / (EMS[i].VG.theta_s - EMS[i].VG.theta_r) > REQUIRED_ACCURACY_THETA ) { accuracy=fabs(delta_theta[i]+delta_theta_i[i]*(Constants::density_ice/Constants::density_water)); } } @@ -2215,32 +1984,48 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) if(aTopBC==NEUMANN) { //If we use Neumann, the massbalance should incorporate the applied TopFluxRate: tmp_mb_topflux=TopFluxRate*dt; } else { //Else when using Dirichlet, we should estimate the influx: (Note that basically with Dirichlet, the change of theta in the element is 0., so the influx in the model domain is equal to the flux from the upper element to the one below.) - tmp_mb_topflux=((theta_np1_mp1[uppernode]+theta_i_np1_mp1[uppernode]*(Constants::density_ice/Constants::density_water))-(theta_n[uppernode] + theta_i_n[uppernode]*(Constants::density_ice/Constants::density_water)))*dz[uppernode] + ((((h_np1_mp1[uppernode]-h_np1_mp1[uppernode-1])/dz_down[uppernode])+cos_sl)*k_np1_m_im12[uppernode]*dt); + tmp_mb_topflux=((theta_np1_mp1[uppernode]+theta_i_np1_mp1[uppernode]*(Constants::density_ice/Constants::density_water))-(theta_n[uppernode] + theta_i_n[uppernode]*(Constants::density_ice/Constants::density_water)))*dz[uppernode] + (2./(rho[uppernode]+rho[uppernode-1]))*((((h_np1_mp1[uppernode]*rho[uppernode]-h_np1_mp1[uppernode-1]*rho[uppernode-1])/dz_down[uppernode])+Xdata.cos_sl*rho[uppernode])*k_np1_m_im12[uppernode]*dt); } if(aBottomBC==NEUMANN) { //If we use Neumann, the massbalance should incorporate the applied BottomFluxRate: tmp_mb_bottomflux=BottomFluxRate*dt; } else { //Else when using Dirichlet, we should estimate the outflux: (Note that basically with Dirichlet, the change of theta in the element is 0., so the outflux in the model domain is equal to the flux from the element above the lowest one to the lowest one.) - tmp_mb_bottomflux=-1.*(((theta_np1_mp1[lowernode]+theta_i_np1_mp1[lowernode]*(Constants::density_ice/Constants::density_water))-(theta_n[lowernode] + theta_i_n[lowernode]*(Constants::density_ice/Constants::density_water)))*dz[lowernode]-((((h_np1_mp1[lowernode+1]-h_np1_mp1[lowernode])/dz_up[lowernode])+cos_sl)*k_np1_m_ip12[lowernode]*dt)); + if(uppernode > 0) { + if(variant != "SEAICE") { + tmp_mb_bottomflux=-1.*( + ((theta_np1_mp1[lowernode]+theta_i_np1_mp1[lowernode]*(Constants::density_ice/Constants::density_water))-(theta_n[lowernode] + theta_i_n[lowernode]*(Constants::density_ice/Constants::density_water)))*dz[lowernode] + -(2./(rho[lowernode+1]+rho[lowernode]))* + ((((h_np1_mp1[lowernode+1]*rho[lowernode+1]-h_np1_mp1[lowernode]*rho[lowernode])/dz_up[lowernode]) + +Xdata.cos_sl*(rho[lowernode+1]*z[lowernode+1]-rho[lowernode]*z[lowernode])/dz_up[lowernode]) + *k_np1_m_ip12[lowernode]*dt) + ); + } else { + tmp_mb_bottomflux=(Salinity.flux_down[lowernode] + Salinity.flux_down_2[lowernode]) * dt; // Units: [m] + } + } else { + //With Dirichlet lower boundary condition and only 1 element, we cannot really estimate the flux, so set it to 0. + tmp_mb_bottomflux=0.; + } } massbalanceerror+=tmp_mb_topflux; //Add topflux (note: topflux>0. means influx) massbalanceerror-=tmp_mb_bottomflux; //Substract bottomflufx (note: bottomflux>0. means outflux) massbalanceerror+=totalsourcetermflux*dt; //Add the sink/source term flux. - if(WriteOutNumerics_Level2==true) printf("MASSBALANCETEST: mass1 %.8E mass2 %.8E topflux %.8E (%.8E) bottomflux %.8E (%.8E) sourceflux %.8E delta %.8E\n", mass1, mass2, tmp_mb_topflux, ((theta_np1_mp1[uppernode]+theta_i_np1_mp1[uppernode]*(Constants::density_ice/Constants::density_water))-(theta_n[uppernode] + theta_i_n[uppernode]*(Constants::density_ice/Constants::density_water)))*dz[uppernode] + ((((h_np1_m[uppernode]-h_np1_m[uppernode-1])/dz_down[uppernode])+1.)*k_np1_m_im12[uppernode]*dt), tmp_mb_bottomflux, -1.*(((theta_np1_mp1[lowernode]+theta_i_np1_mp1[lowernode]*(Constants::density_ice/Constants::density_water))-(theta_n[lowernode] + theta_i_n[lowernode]*(Constants::density_ice/Constants::density_water)))*dz[lowernode]-((((h_np1_m[lowernode+1]-h_np1_m[lowernode])/dz_up[lowernode])+cos_sl)*k_np1_m_ip12[lowernode]*dt)), totalsourcetermflux*dt, massbalanceerror); + + if(WriteDebugOutput) printf("MASSBALANCETEST: mass1 %.8E mass2 %.8E topflux %.8E (%.8E) bottomflux %.8E (%.8E) sourceflux %.8E delta %.8E\n", mass1, mass2, tmp_mb_topflux, ((theta_np1_mp1[uppernode]+theta_i_np1_mp1[uppernode]*(Constants::density_ice/Constants::density_water))-(theta_n[uppernode] + theta_i_n[uppernode]*(Constants::density_ice/Constants::density_water)))*dz[uppernode] + ((((h_np1_m[uppernode]-h_np1_m[uppernode-1])/dz_down[uppernode])+1.)*k_np1_m_im12[uppernode]*dt), tmp_mb_bottomflux, -1.*(((theta_np1_mp1[lowernode]+theta_i_np1_mp1[lowernode]*(Constants::density_ice/Constants::density_water))-(theta_n[lowernode] + theta_i_n[lowernode]*(Constants::density_ice/Constants::density_water)))*dz[lowernode]-(1./rho[lowernode])*((((h_np1_m[lowernode+1]*rho[lowernode+1]-h_np1_m[lowernode]*rho[lowernode])/dz_up[lowernode])+Xdata.cos_sl*rho[lowernode])*k_np1_m_ip12[lowernode]*dt)), totalsourcetermflux*dt, massbalanceerror); //Make sure to trigger a rewind by making max_delta_h very large in case the mass balance is violated or change in head are too large. if(fabs(massbalanceerror)>1E-1 || max_delta_h>MAX_ALLOWED_DELTA_H) { max_delta_h=2.*MAX_ALLOWED_DELTA_H; } - if (accuracy > 1E-20 || fabs(massbalanceerror)>maxallowedmassbalanceerror) { //Check whether we converged. Note that accuracy is only assigned when the layer exceeds the prescribed required accuracy. This is because we want to have more than one convergence criterion (both h and theta based), we say accuracy=0 is a sign of convergence in the whole domain. + if (accuracy > 1E-20 || fabs(massbalanceerror)>maxallowedmassbalanceerror || solver_result==-1) { //Check whether we converged. Note that accuracy is only assigned when the layer exceeds the prescribed required accuracy. This is because we want to have more than one convergence criterion (both h and theta based), we say accuracy=0 is a sign of convergence in the whole domain. boolConvergence=false; } - if(WriteOutNumerics_Level1==true) printf("CONVERGENCE: layer: %d/%d --- acc_h: %.10f acc_theta: %.10f acc: %.10f def_norm: %f converged? %s\n", track_trigger_layer_accuracy, trigger_layer_accuracy, track_accuracy_h, track_accuracy_theta, accuracy, deficit_vector_norm, (boolConvergence)?"yes":"no"); + if(WriteDebugOutput) printf("CONVERGENCE: acc_h: %.10f acc_theta: %.10f acc: %.10f converged? %s\n", track_accuracy_h, track_accuracy_theta, accuracy, (boolConvergence)?"yes":"no"); //Copy solution, to prepare for next iteration - for (i = uppernode; i >= lowernode; i--) { + for (i = lowernode; i <= uppernode; i++) { h_np1_m[i]=h_np1_mp1[i]; theta_np1_m[i]=theta_np1_mp1[i]; theta_i_np1_m[i]=theta_i_np1_mp1[i]; @@ -2249,7 +2034,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Rewind time step control: retry finding solution with smaller time step when MAX_ITER is exceeded. Also when max_delta_h is too large we do a rewind, else the model is starting to blow up. if((niter>MAX_ITER || max_delta_h>MAX_ALLOWED_DELTA_H) && dt > MIN_VAL_TIMESTEP && boolConvergence==false) { - if(WriteOutNumerics_Level1==true) std::cout << "REWIND: timestep " << std::setprecision(20) << dt << std::setprecision(6) << " ---> "; + if(WriteDebugOutput) std::cout << "REWIND: timestep " << std::setprecision(20) << dt << std::setprecision(6) << " ---> "; niter_seqrewinds++; //We increase the sequential rewinds counter. For this first rewind, this will give a power of 1 for the new time step, etc. //When we find a good solution again, we reset this counter to 0. @@ -2258,7 +2043,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) dt*=pow(0.333, double(niter_seqrewinds)); //Now make the time step smaller, we use niter_seqrewinds to speed up when we encounter multiple consecutive rewinds. //The value of 0.333 is taken from the HYDRUS-manual, where they do this in case a rewind is necessary. - if(WriteOutNumerics_Level1==true) std::cout << std::setprecision(20) << dt << " (trigger layer: " << trigger_layer_blowup << " accuracy: " << std::setprecision(10) << accuracy << " max_delta_h: " << max_delta_h << ")\n" << std::setprecision(6); + if(WriteDebugOutput) std::cout << std::setprecision(20) << dt << " accuracy: " << std::setprecision(10) << accuracy << " max_delta_h: " << max_delta_h << ")\n" << std::setprecision(6); niter=0; //Because of the rewind, we start again with the iterations. niter_nrewinds++; //Increase rewind counter @@ -2266,7 +2051,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) boolConvergence=false; //Of course, when we need to rewind, we have had no convergence. DoRewindFlag=true; //Set DoRewindFlag to true, to quit the iteration loop. StopLoop=false; //In case StopLoop was set true (last time step), we set it back to false. It might be that the smaller time step won't match the SNOWPACK time step any longer. - for (i = uppernode; i >= lowernode; i--) { //We have to reset the whole domain, because we do the time step for the whole domain. + for (i = lowernode; i <= uppernode; i++) { //We have to reset the whole domain, because we do the time step for the whole domain. h_np1_m[i]=h_n[i]; theta_np1_m[i]=theta_n[i]; theta_i_np1_m[i]=theta_i_n[i]; //Set back ice content due to soil freezing/thawing @@ -2279,36 +2064,35 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Print latest state for debugging: bool DoThrow=false; if(SafeMode==false) { - prn_msg(__FILE__, __LINE__, "err", Date(), "Richards-Equation solver did not converge: reached maximum number of iterations (500), with a time step: %G\n", dt); + prn_msg(__FILE__, __LINE__, "err", date, "Richards-Equation solver did not converge for %s: reached maximum number of iterations (500), with a time step: %G\n", Xdata.meta.stationID.c_str(), dt); DoThrow=true; } else { if(seq_safemode>3) { - std::cout << "[E] ERROR in Richards-Equation solver: no convergence! SafeMode was not able to continue simulation!\n"; + std::cout << "[E] [" << date.toString(mio::Date::ISO) << "] ERROR in Richards-Equation solver: no convergence! SafeMode was not able to continue simulation!\n"; DoThrow=true; } else { - std::cout << "[W] WARNING in Richards-Equation solver: no convergence! SafeMode was used to continue simulation! [" << seq_safemode << "].\n"; + std::cout << "[W] [" << date.toString(mio::Date::ISO) << "] WARNING in Richards-Equation solver: no convergence! SafeMode was used to continue simulation! [" << seq_safemode << ", stn=" << Xdata.meta.stationID << "].\n"; } } - std::cout << " POSSIBLE SOLUTIONS:\n =============================================================================\n"; - if(snowpack_dt>900) std::cout << " - SNOWPACK time step is larger than 15 minutes. This numerical problem\n may be resolved by using a time step of 15 minutes.\n"; + std::cout << " POSSIBLE SOLUTIONS:\n ========================================================================================\n"; + if(sn_dt>900) std::cout << " - SNOWPACK time step is larger than 15 minutes. This numerical problem\n may be resolved by using a time step of 15 minutes.\n"; #ifndef CLAPACK std::cout << " - SNOWPACK was not compiled with BLAS and CLAPACK libraries.\n Try installing libraries BLAS and CLAPACK and use solver TGSV (default).\n"; #endif std::cout << " - Verify that the soil is not initialized in a very dry or a very\n wet state.\n"; - if(BottomBC!=FREEDRAINAGE) std::cout << " - If the soil is saturated, try setting LB_COND_WATERFLUX = FREEDRAINAGE\n in the [SnowpackAdvanced] section of the ini-file.\n"; - if(BottomBC!=WATERTABLE) std::cout << " - If the soil is dry, try setting LB_COND_WATERFLUX = WATERTABLE in the\n [SnowpackAdvanced] section of the ini-file.\n"; + std::cout << " - If the snow and/or soil saturates, try setting LB_COND_WATERFLUX = FREEDRAINAGE\n in the [SnowpackAdvanced] section of the ini-file.\n"; + std::cout << " - If the soil is drying out, try setting LB_COND_WATERFLUX = WATERTABLE in the\n [SnowpackAdvanced] section of the ini-file.\n"; std::cout << " - Try bucket scheme, by setting WATERTRANSPORTMODEL_SNOW = BUCKET and\n WATERTRANSPORTMODEL_SOIL = BUCKET in the [SnowpackAdvanced] section\n of the ini-file.\n"; - std::cout << " - When using Canopy module, there is a known issue with transpiration.\n Please see issue 471 (http://models.slf.ch/p/snowpack/issues/471/).\n"; - std::cout << "\n -----------------------------------------------------------------------------\n SOLVER DUMP:\n"; - for (i = uppernode; i >= lowernode; i--) { - printf(" layer [%d]: h(t): %.3f h(t+dt): %.3f th(t): %.3f (%.3f-%.3f) th(t+dt): %.3f th_ice(t): %.3f th_ice(t+dt): %.3f (vg_params: %.2f %.2f %.2f)\n", i, h_n[i], h_np1_m[i], theta_n[i], theta_r[i], theta_s[i], theta_np1_m[i], (i= lowernode; i--) { //We have to reset the whole domain, because we do the time step for the whole domain. + for (i = lowernode; i <= uppernode; i++) { //We have to reset the whole domain, because we do the time step for the whole domain. // The first time a safe mode is required, set source/sink terms to 0. if(s[i]!=0) { SafeMode_MBE+=s[i]*(sn_dt-TimeAdvance)*Constants::density_water*dz[i]; - printf(" --> reset source/sink term at layer %d from %G ", i, s[i]); + printf(" --> reset source/sink term at layer %d from %G ", int(i), s[i]); s[i]=0.; totalsourcetermflux=0.; printf(" to %G.\n", s[i]); @@ -2338,22 +2121,23 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) } } else { if(seq_safemode==3) { - for (i = uppernode; i >= lowernode; i--) { //We have to reset the whole domain, because we do the time step for the whole domain. + mass1=0.; //Because we will fiddle around with theta, we should update mass1 (mass at beginning of time step) + for (i = lowernode; i <= uppernode; i++) { //We have to reset the whole domain, because we do the time step for the whole domain. // Update the SafeMode mass balance error tracking variable by "removing" all water SafeMode_MBE-=(theta_n[i]+theta_i_n[i])*dz[i]*Constants::density_water; // Make sure pressure head is in secure limits: - h_n[i]=std::max(h_d, std::min(h_e[i], h_n[i])); - theta_n[i]=fromHtoTHETAforICE(h_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_i_n[i]); + h_n[i]=std::max(h_d, std::min(EMS[i].VG.h_e, h_n[i])); + theta_n[i]=EMS[i].VG.fromHtoTHETAforICE(h_n[i], theta_i_n[i]); //Deal with dry layers - if(theta_n[i]+theta_i_n[i] < theta_r[i]+(REQUIRED_ACCURACY_THETA/1000.)) { - theta_n[i]=theta_r[i]+(REQUIRED_ACCURACY_THETA/1000.); - h_n[i]=fromTHETAtoHforICE(theta_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], h_d, theta_i_n[i]); + if(theta_n[i]+theta_i_n[i] < EMS[i].VG.theta_r+(REQUIRED_ACCURACY_THETA/1000.)) { + theta_n[i]=EMS[i].VG.theta_r+(REQUIRED_ACCURACY_THETA/1000.); + h_n[i]=EMS[i].VG.fromTHETAtoHforICE(theta_n[i], h_d, theta_i_n[i]); } //Deal with wet layers - if(theta_n[i]+theta_i_n[i] > theta_s[i]-(REQUIRED_ACCURACY_THETA/1000.)) { + if(theta_n[i]+theta_i_n[i] > EMS[i].VG.theta_s-(REQUIRED_ACCURACY_THETA/1000.)) { theta_i_n[i]*=0.90; - theta_n[i]=((theta_n[i]-theta_r[i])*0.9)+theta_r[i]; - h_n[i]=fromTHETAtoHforICE(theta_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], h_d, theta_i_n[i]); + theta_n[i]=((theta_n[i]-EMS[i].VG.theta_r)*0.9)+EMS[i].VG.theta_r; + h_n[i]=EMS[i].VG.fromTHETAtoHforICE(theta_n[i], h_d, theta_i_n[i]); } // Update the SafeMode mass balance error tracking variable by "adding" the water again SafeMode_MBE+=(theta_n[i]+theta_i_n[i])*dz[i]*Constants::density_water; @@ -2364,12 +2148,12 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) delta_Te_i[i]=0.; //Reset temperature change due to soil freezing/thawing delta_Te_adv_i[i]=0.; //Reset temperature change due to heat advection by water flowing - + //Now update mass1, which may have changed due to SafeMode throwing some water away, or introducing some water: mass1+=(theta_n[i]+(theta_i_n[i]*(Constants::density_ice/Constants::density_water)))*dz[i]; } std::cout << " --> reset dry and wet layers.\n"; - + //Deal with the TopFluxRate: if(surfacefluxrate!=0.) { SafeMode_MBE+=(surfacefluxrate/2.)*(sn_dt-TimeAdvance)*Constants::density_water; @@ -2381,8 +2165,12 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) //Deal with the TopFluxRate: SafeMode_MBE+=(surfacefluxrate/2.)*(sn_dt-TimeAdvance)*Constants::density_water; printf(" --> set surfacefluxrate from %G ", surfacefluxrate); - surfacefluxrate/=2.; - printf("to %G.\n", surfacefluxrate); + if(seq_safemode==2) { + surfacefluxrate=0.; + } else { + surfacefluxrate/=2.; + } + printf("to %G.\n", surfacefluxrate); } } std::cout << " Estimated mass balance error due to SafeMode: " << std::scientific << SafeMode_MBE << std::fixed << " kg/m^2\n"; @@ -2399,14 +2187,89 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) stats_nsteps++; //Update the statistics of the number of steps. //Prepare for next time step: - for (i = uppernode; i >= lowernode; i--) { //Cycle through all Richards solver domain layers. + if (Xdata.Seaice != NULL) { + // + // For sea ice, deal with salinity flux + // + + // Set the SalinityTransport vector with the solution after liquid water flow + std::vector DeltaSal(nE, 0.); //Salinity changes + std::vector DeltaSal2(nE, 0.); //Salinity changes + for (i = lowernode; i <= uppernode; i++) { //We loop over all Richards solver domain layers + Salinity.BrineSal[i] = EMS[i].salinity / theta_n[i]; //Calculate brine salinity + Salinity.theta1[i] = theta_n[i]; + Salinity.theta2[i] = theta_np1_mp1[i]; + } + Salinity.BottomSalinity = (Xdata.Seaice->OceanSalinity); + Salinity.TopSalinity = (0.); + + // Solve the transport equation + if(SalinityTransportSolver==SalinityTransport::EXPLICIT) { + Salinity.SolveSalinityTransportEquationExplicit(dt, DeltaSal); + } else { + Salinity.SolveSalinityTransportEquationImplicit(dt, DeltaSal, 0.5, (SalinityTransportSolver==SalinityTransport::IMPLICIT2)); + } + + // Apply and verify solution + const double tol = 0.; + for (i = lowernode; i <= uppernode; i++) { + //EMS[i].salinity = Salinity.BrineSal[i] * theta_np1_mp1[i]; + //EMS[i].salinity += DeltaSal[i] * (theta_np1_mp1[i]); + + //Verify new salinity profile + if(EMS[i].salinity < 0. && TimeAdvance > 900. - Constants::eps2) { + EMS[i].salinity = tol; + } + Xdata.Edata[i].meltfreeze_tk = Xdata.Seaice->calculateMeltingTemperature(Salinity.BrineSal[i]); + } + + if (SALINITY_MIXING != NONE) { + for (i = lowernode; i <= uppernode; i++) { //We loop over all Richards solver domain layers + DeltaSal2[i] = DeltaSal[i]; + DeltaSal[i] = 0.; + Salinity.BrineSal[i] = EMS[i].salinity / theta_n[i]; + Salinity.flux_up[i] = Salinity.flux_up_2[i]; + Salinity.flux_down[i] = Salinity.flux_down_2[i]; + Salinity.flux_up_2[i] = Salinity.flux_down_2[i] = 0.; + //Salinity.D[i] = 0.; + } + // Solve the transport equation + if(SalinityTransportSolver==SalinityTransport::EXPLICIT) { + Salinity.SolveSalinityTransportEquationExplicit(dt, DeltaSal2); + } else { + Salinity.SolveSalinityTransportEquationImplicit(dt, DeltaSal2, 0.5, SalinityTransportSolver==SalinityTransport::IMPLICIT2); + } + } + + // Apply and verify solution + for (i = lowernode; i <= uppernode; i++) { + EMS[i].salinity = Salinity.BrineSal[i] * theta_np1_mp1[i]; + //EMS[i].salinity = (((theta_n[i]!=0.) ? (EMS[i].salinity / theta_n[i]) : (0.)) + (DeltaSal[i] + DeltaSal2[i])) * (theta_np1_mp1[i]); + Salinity.sb[i] = 0.; + //Verify new salinity profile + if(EMS[i].salinity < 0. && TimeAdvance > 900. - Constants::eps2) { + EMS[i].salinity = tol; + } + EMS[i].meltfreeze_tk = Xdata.Seaice->calculateMeltingTemperature(Salinity.BrineSal[i]); + EMS[i].updDensity(); + EMS[i].M=EMS[i].L*EMS[i].Rho; + } + + + + Xdata.Seaice->BottomSalFlux += Salinity.BottomSalFlux; + Xdata.Seaice->TopSalFlux += Salinity.TopSalFlux; + Salinity.BottomSalFlux = 0.; + Salinity.TopSalFlux = 0.; + } + + for (i = lowernode; i <= uppernode; i++) { //Cycle through all Richards solver domain layers. //Apply change in temperature due to soil freezing or thawing and heat advection by flowing water: - if(SnowpackElement[i] 0.) { //Check if phase change did occur in soil delta_Te[i]+=delta_Te_i[i]; - EMS[SnowpackElement[i]].QIntmf+=(Constants::density_ice*(theta_i_np1_mp1[i]-theta_i_n[i])*(Constants::specific_heat_water-Constants::specific_heat_ice)*(T_melt[i]-Constants::melting_tk))/dt; - EMS[SnowpackElement[i]].melting_tk=EMS[SnowpackElement[i]].freezing_tk=T_melt[i]; + EMS[i].QIntmf+=(Constants::density_ice*(theta_i_np1_mp1[i]-theta_i_n[i])*(Constants::specific_heat_water-Constants::specific_heat_ice)*(EMS[i].meltfreeze_tk-Constants::meltfreeze_tk))/dt; // Now that we have performed a phase change, we should correct the nodal temperatures too. This will be done later in PhaseChange, // by using Qmf to determine amount of phase change that occurred. } @@ -2419,11 +2282,7 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) delta_Te_adv_i[i]=0.; //Set initial solution for next iteration - if(activelayer[i]==true) { - theta_np1_mp1[i]=fromHtoTHETAforICE(h_np1_m[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_i_np1_mp1[i]); - } else { - theta_np1_mp1[i]=theta_n[i]; - } + theta_np1_mp1[i]=EMS[i].VG.fromHtoTHETAforICE(h_np1_m[i], theta_i_np1_mp1[i]); delta_h_dt[i]=(h_np1_mp1[i]-h_n[i])/dt; //We make delta_h_dt relative to time step. If time step is allowed to change, we can use this delta_h_dt (actually a derivative) to better estimate solution in next time step. delta_theta_dt[i]=(theta_np1_mp1[i]-theta_n[i])/dt; //We make delta_theta_dt relative to time step. If time step is allowed to change, we can use this delta_h_dt (actually a derivative) to better estimate solution in next time step. @@ -2436,43 +2295,70 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) } - //Determine (estimate) flux across boundaries (downward ==> positive flux): //This is an additional check for the boundaries. - actualtopfluxcheck+=((delta_theta_dt[uppernode]*dt)*dz[uppernode])+(((h_n[uppernode]-h_n[uppernode-1])/dz_down[uppernode])+cos_sl)*k_np1_m_im12[uppernode]*dt; actualtopflux+=TopFluxRate*dt; + Xdata.Ndata[nN-1].water_flux += TopFluxRate*dt; refusedtopflux+=(surfacefluxrate-TopFluxRate)*dt; if(aBottomBC==DIRICHLET) { - actualbottomflux+=-1.*((delta_theta_dt[lowernode]+(delta_theta_i_dt[lowernode]*(Constants::density_ice/Constants::density_water))*dt)-((((h_np1_mp1[lowernode+1]-h_np1_mp1[lowernode])/dz_up[lowernode])+cos_sl)*k_np1_m_ip12[lowernode]*dt)); + if(uppernode > 0) { + double tmp_flux = 0.; + if(variant != "SEAICE") { + tmp_flux=-1.*((delta_theta_dt[lowernode]+(delta_theta_i_dt[lowernode]*(Constants::density_ice/Constants::density_water))*dt*dz_[lowernode]) - (1./rho[lowernode])*((((h_np1_mp1[lowernode+1]*rho[lowernode+1]-h_np1_mp1[lowernode]*rho[lowernode])/dz_up[lowernode])+Xdata.cos_sl*rho[lowernode])*k_np1_m_ip12[lowernode]*dt)); + } else { + tmp_flux=(Salinity.flux_down[0]+Salinity.flux_down_2[0])*dt; + } + actualbottomflux+=tmp_flux; + Xdata.Ndata[0].water_flux += tmp_flux*Constants::density_water; + } else { + //With Dirichlet lower boundary condition and only 1 element, we cannot really estimate the flux, so set it to 0. + const double tmp_flux=0.; + actualbottomflux+=tmp_flux; + } } else { actualbottomflux+=BottomFluxRate*dt; + Xdata.Ndata[0].water_flux += BottomFluxRate*dt*Constants::density_water; + } + if(nN > 2) { // Note: top and bottom node have been filled above, so only execute loop when 3 or more nodes are present. + for(size_t node_i=1; node_i < nN-1; node_i++) { + Xdata.Ndata[node_i].water_flux += (1./rho[node_i])*((((h_n[node_i]*rho[node_i]-h_n[node_i-1]*rho[node_i-1])/dz_up[node_i-1])+Xdata.cos_sl*rho[node_i])*k_np1_m_ip12[node_i-1]*dt)*Constants::density_water; + } } - - massbalanceerror_sum+=massbalanceerror; - if(WriteOutNumerics_Level1==true) printf("MASSBALANCE: mass1 %.8f mass2 %.8f delta %.8f\n", mass1, mass2, massbalanceerror); - //Determine flux at soil snow interface (note: postive=flux upward, negative=flux downward): - if (int(nsoillayers_snowpack) 0 means influx! - snowsoilinterfaceflux_after=((((h_n[nsoillayers_richardssolver]-h_n[nsoillayers_richardssolver-1])/dz_up[nsoillayers_richardssolver-1])+cos_sl)*k_np1_m_ip12[nsoillayers_richardssolver-1]*dt); - snowsoilinterfaceflux1+=snowsoilinterfaceflux_after; - // Other method to estimate soil snow interface flux (based on average before and end of time step). - snowsoilinterfaceflux2+=0.5*(snowsoilinterfaceflux_before+snowsoilinterfaceflux_after); + if(Xdata.SoilNode>0) { + const double tmp_snowsoilinterfaceflux=(1./rho[Xdata.SoilNode])*((((h_n[Xdata.SoilNode]*rho[Xdata.SoilNode]-h_n[Xdata.SoilNode-1]*rho[Xdata.SoilNode-1])/dz_up[Xdata.SoilNode-1])+Xdata.cos_sl*rho[Xdata.SoilNode])*k_np1_m_ip12[Xdata.SoilNode-1]*dt); + snowsoilinterfaceflux+=tmp_snowsoilinterfaceflux; + } else { + snowsoilinterfaceflux=actualbottomflux; + } } } else { //Make the commented lines active if you whish to add the TopFluxRate to the snowsoilinterfaceflux even when no snow is present. - //snowsoilinterfaceflux1+=TopFluxRate*dt; - //snowsoilinterfaceflux2+=TopFluxRate*dt; + //snowsoilinterfaceflux+=TopFluxRate*dt; + } + + + //Determine slope parallel flux + const double tmp_sin_sl = sqrt(1. - Xdata.cos_sl * Xdata.cos_sl); //Calculate sin of slope, from cos_sl + for (i = lowernode; i <= uppernode; i++) { //Cycle through all Richards solver domain layers. + EMS[i].SlopeParFlux += tmp_sin_sl*K[i]*dt; + } + + + massbalanceerror_sum+=massbalanceerror; + if(WriteDebugOutput) { + printf("MASSBALANCE: mass1 %.8f mass2 %.8f delta %.8f\n", mass1, mass2, massbalanceerror); + printf("CONTROL: %.15f %.15f %.15f %.15f %.15f %.15f %f\n", surfacefluxrate, TopFluxRate, actualtopflux, BottomFluxRate, actualbottomflux, snowsoilinterfaceflux, dt); } - if(WriteOutNumerics_Level2==true) printf("CONTROL: %.15f %.15f %.15f %.15f %.15f %.15f %.15f %.15f %f\n", surfacefluxrate, TopFluxRate, actualtopflux, actualtopfluxcheck, BottomFluxRate, actualbottomflux, snowsoilinterfaceflux1, snowsoilinterfaceflux2, dt); //Time step control //This time step control increases the time step when niter is below a certain value. When rewinds occurred in the time step, no change is done (dt already adapted by the rewind-mechanim), if too many iterations, time step is decreased. @@ -2488,288 +2374,338 @@ void ReSolver1d::SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata) if ( dt < MIN_VAL_TIMESTEP) dt=MIN_VAL_TIMESTEP; if ( dt > MAX_VAL_TIMESTEP) dt=MAX_VAL_TIMESTEP; - //Special limit in case of snow: - if(int(nsoillayers_snowpack)MAX_VAL_TIMESTEP_FOR_SNOW) { - dt=MAX_VAL_TIMESTEP_FOR_SNOW; - } - //Time step statistics if(stats_min_dt>dt) stats_min_dt=dt; if(stats_max_dt new dt: %.15f (Problematic layer: %d of %d)\n", nsteps, TimeAdvance, niter, niter_nrewinds, actualtopflux, dt, track_trigger_layer_accuracy, uppernode); + if(WriteDebugOutput) printf("NSTEPS: %d, TIME ADVANCE: %f, ITERS NEEDED: %d [%d], ACTUALTOPFLUX: %.10f ---> new dt: %.15f\n", nsteps, TimeAdvance, niter, niter_nrewinds, actualtopflux, dt); } //END DoRewindFlag==false } while(StopLoop==false); //This is the main loop to perform 1 SNOWPACK time step - //Because the Richards solver domain and the snowpack domain does not necessarily match (Richards solver domain can have more layers), we do the following trick: - //1) We first empty all the layers in the snowpack domain - for (i = toplayer-1; i >= 0; i--) { //We loop over all SNOWPACK layers ... - EMS[i].theta[WATER]=0.; //... and set water content to 0 - EMS[i].theta_r=0.; // and set residual water content to 0 + // Copy results back to SNOWPACK + for (i = lowernode; i <= uppernode; i++) { //We loop over all Richards solver domain layers if(EMS[i].theta[SOIL]>Constants::eps2) { //We are in soil - EMS[i].theta[ICE]=0.; //... set ice content to 0. + EMS[i].theta[WATER]=EMS[i].VG.fromHtoTHETAforICE(h_n[i], theta_i_n[i]); + EMS[i].theta[ICE]=theta_i_n[i]; + } else { //We are in snow + EMS[i].theta[WATERINDEX]=EMS[i].VG.fromHtoTHETAforICE(h_n[i], theta_i_n[i]); } + EMS[i].h=h_n[i]; + + //And adjust all the properties accordingly + EMS[i].theta[AIR]=1.-EMS[i].theta[WATER]-EMS[i].theta[WATER_PREF]-EMS[i].theta[ICE]-EMS[i].theta[SOIL]; + //Now we have checked everything, we make it fit between [0, 1]: to get rid off all round-off errors + EMS[i].theta[AIR]=std::max(0., std::min(1., EMS[i].theta[AIR])); + EMS[i].theta[WATER]=std::max(0., std::min(1., EMS[i].theta[WATER])); + EMS[i].theta[WATER_PREF]=std::max(0., std::min(1., EMS[i].theta[WATER_PREF])); + EMS[i].theta[ICE]=std::max(0., std::min(1., EMS[i].theta[ICE])); + EMS[i].updDensity(); + EMS[i].M=EMS[i].L*EMS[i].Rho; + EMS[i].heatCapacity(); + + //Every change in ice content in a specific layer must be associated with phase changes. Store the associated energy accordingly. + EMS[i].Qmf += ((EMS[i].theta[ICE]-snowpackBACKUPTHETAICE[i]) * Constants::density_ice * Constants::lh_fusion) / sn_dt; // Units: [W m-3] + //We transferred the temperature change of the element due to soil freezing/thawing in Qmf, so reset delta_Te: + delta_Te[i]=0.; } - //2) Now we fill them, scaling with the Richards solver domain layer heights. The "dictionary" makes that all the Richards solver domain layers belonging to a single SNOWPACK layer are summed together. - for (i = uppernode; i >= lowernode; i--) { //We loop over all Richards solver domain layers - if(EMS[SnowpackElement[i]].theta[SOIL]>Constants::eps2) { //We are in soil - if(activelayer[i]==true) { //If we have more water than theta_r, we copy the end state from the solver - //EMS[SnowpackElement[i]].theta[WATER]+=dz[i]*fromHtoTHETA(h_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_d[i]); - EMS[SnowpackElement[i]].theta[WATER]+=dz[i]*fromHtoTHETAforICE(h_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_i_n[i]); - EMS[i].theta[ICE]+=dz[i]*theta_i_n[i]; - /////EMS[i].theta[ICE]+=dz[i]*theta_i_n[i]*(Constants::density_water/Constants::density_ice); - } else { //We are in "dry" conditions, and we just copy the initial state. - EMS[SnowpackElement[i]].theta[WATER]+=dz[i]*theta_n[i]; - } - } else { //We are in snow - if(activelayer[i]==true) { - //EMS[SnowpackElement[i]].theta[WATER]+=dz[i]*fromHtoTHETA(h_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_d[i]); - EMS[SnowpackElement[i]].theta[WATER]+=dz[i]*fromHtoTHETAforICE(h_n[i], theta_r[i], theta_s[i], alpha[i], m[i], n[i], Sc[i], h_e[i], theta_i_n[i]); - } else { - EMS[SnowpackElement[i]].theta[WATER]+=dz[i]*theta_n[i]; + + //double max_flux=0.; + if(enable_pref_flow) { + if(matrix==true) { + // We calculate the pref_flow area now + for (i = lowernode; i <= uppernode; i++) { + // These commented lines may become useful for a criterion where the "system influx rate" is used, as is typical for preferential flow area + //const double flux_compare = (i==0) ? (0.) : ( + //(i==uppernode)?(surfacefluxrate):(((((h_n[i]-h_n[i-1])/dz_up[i-1])+cos_sl)*sqrt(K[i]*K[i-1])*dt)) + //); + //if(max_flux=Xdata.SoilNode) { //For snow only + // Volumetric area: + //vol_area = exp(0.09904-3.557*(EMS[i].ogs)); // As presented at EGU 2016. + const double vol_area = 0.0584 * pow((0.5*EMS[i].ogs), -1.1090277); + + const double area = std::max(0.01, std::min(0.90, vol_area)); + + // Max is to ensure the pref flow area doesn't decrease below saturation of the pref flow path. + EMS[i].PrefFlowArea = std::min(0.999*(1.-(EMS[i].theta[WATER]/((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)))), std::max(1.001*(EMS[i].theta[WATER_PREF]/((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water))), area)); + } else { + EMS[i].PrefFlowArea = 0.; + } } } - EMS[SnowpackElement[i]].theta_r+=dz[i]*theta_r[i]; - //EMS[SnowpackElement[i]].head+=dz[i]*h_n[i]; + } else { + for (i = lowernode; i <= uppernode; i++) { + EMS[i].PrefFlowArea = 0.; + } } - //3) Now scale them with the SNOWPACK domain layer heights and adjust properties accordingly - const double MIN_VAL_THETA_SNOWPACK=0.0; //Minimum water content that is allowed to be passed on to the rest of SNOWPACK. When theta is below this value, it is truncated to 0, before leaving the Richards solver. - for (i = toplayer-1; i >= 0; i--) { //We loop over all SNOWPACK layers - //Do the actual scaling - if ((i>nsoillayers_snowpack-1 && EMS[i].theta[WATER]/EMS[i].L>MIN_VAL_THETA_SNOWPACK) || (i<=nsoillayers_snowpack-1)) { //If we are in snow and have enough water to pass the water on to the rest of SNOWPACK, OR if we are in soil: - EMS[i].theta[WATER]/=EMS[i].L; //Scale each layer with SNOWPACK layer height - if(EMS[i].theta[SOIL]>Constants::eps2) { //We are in soil ... - EMS[i].theta[ICE]/=EMS[i].L; //... scale ice content. - } - EMS[i].theta_r/=EMS[i].L; //Scale each layer with SNOWPACK layer height - //EMS[i].head/=EMS[i].L; //Scale each layer with SNOWPACK layer height - - //In case we had to melt ice to get theta_r, we have to adjust the temperature: - EMS[i].Te -= dT[i]; - if(i==int(nE)-1 && i>=0) { //HACK, TODO: remove type inconstency in comparison - NDS[i+1].T-=dT[i]; - NDS[i].T-=dT[i]; - } - //And adjust all the properties accordingly - EMS[i].theta[AIR]=1.-EMS[i].theta[WATER]-EMS[i].theta[ICE]-EMS[i].theta[SOIL]; - //Now we have checked everything, we make it fit between [0, 1]: to get rid off all round-off errors - EMS[i].theta[AIR]=std::max(0., std::min(1., EMS[i].theta[AIR])); - EMS[i].theta[WATER]=std::max(0., std::min(1., EMS[i].theta[WATER])); - EMS[i].theta[ICE]=std::max(0., std::min(1., EMS[i].theta[ICE])); - EMS[i].Rho = (EMS[i].theta[ICE] * Constants::density_ice) + (EMS[i].theta[WATER] * Constants::density_water) + (EMS[i].theta[SOIL] * EMS[i].soil[SOIL_RHO]); - EMS[i].M=EMS[i].L*EMS[i].Rho; - EMS[i].heatCapacity(); - - //Every change in ice content in a specific layer must be associated with phase changes. Store the associated energy accordingly. - EMS[i].Qmf += ((EMS[i].theta[ICE]-snowpackBACKUPTHETAICE[i]) * Constants::density_ice * Constants::lh_fusion) / snowpack_dt; // Units: [W m-3] - //We transferred the temperature change of the element due to soil freezing/thawing in Qmf, so reset delta_Te: - delta_Te[i]=0.; - } else { //We are in snow and don't have enough water, snow should be dry, so set back to initial values. - //NOTE: there is an issue to be solved here when Richard domain does not match snowpack domain (use of sublayers)!! - wateroverflow[i]+=(EMS[i].theta[WATER]-theta_d[i]); //This is water which stays or is taken out from the domain by this layer. - - EMS[i].theta[ICE]=snowpackBACKUPTHETAICE[i]; - EMS[i].theta[WATER]=snowpackBACKUPTHETAWATER[i]; - //Now we have checked everything, we make it fit between [0, 1]: to get rid off all round-off errors - EMS[i].theta[AIR]=std::max(0., std::min(1., EMS[i].theta[AIR])); - EMS[i].theta[WATER]=std::max(0., std::min(1., EMS[i].theta[WATER])); - EMS[i].theta[ICE]=std::max(0., std::min(1., EMS[i].theta[ICE])); + // Here is a very very crucial part. Here the water wil be either transferred to matrix or preferential domain. + // To determine the thresholds below, we use the water entry pressure, as provided in Eq. 15 in Hirashima et al. (2014). + i = uppernode + 1; + while (i-- > lowernode) { + if(enable_pref_flow) { + if(i>=Xdata.SoilNode) { // For snow + if(matrix) { + // First from matrix to preferential flow ... + if(i==Xdata.SoilNode) { + // First snow layer should not put water in soil directly + // Calculate threshold in the current layer that belongs to water entry pressure of the layer + const double dummy=EMS[i].VG.theta_s; + EMS[i].VG.theta_s=(1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(1.-EMS[i].PrefFlowArea); + const double matrix_threshold=std::max(0.001, EMS[i].VG.fromHtoTHETA((-1.*((0.0437 / EMS[i].ogs) + 0.01074)))); + EMS[i].VG.theta_s=dummy; + if(EMS[i].theta[WATER]>matrix_threshold) { + const double dtheta_w=std::max(0., (EMS[i].theta[WATER]-matrix_threshold)); + EMS[i].theta[WATER_PREF]+=dtheta_w; + EMS[i].theta[WATER]-=dtheta_w; + } + /*const double theta_thr=pref_threshold*pref_flowarea[i]; + if(EMS[i].theta[WATER_PREF]>theta_thr) { + EMS[i].theta[WATER]+=(EMS[i].theta[WATER_PREF]-theta_thr); + EMS[i].theta[WATER_PREF]=theta_thr; + }*/ + } else { + // Calculate threshold in the current layer that belongs to water entry pressure of the layer below + const double dummy=EMS[i].VG.theta_s; + EMS[i-1].VG.theta_s=(1.-EMS[i-1].theta[ICE])*(Constants::density_ice/Constants::density_water)*(1.-EMS[i-1].PrefFlowArea); + const double matrix_threshold=std::max(0.001, EMS[i-1].VG.fromHtoTHETA((-1.*((0.0437 / (pref_flow_param_heterogeneity_factor * EMS[i-1].ogs)) + 0.01074))-dz_up[i-1])); + EMS[i].VG.theta_s=dummy; + if(EMS[i].theta[WATER]>matrix_threshold) { + // Enforcing equal saturation between matrix part at [i] and preferential part at [i-1] + const double tmp_theta_water_tot = EMS[i].theta[WATER]*EMS[i].L + EMS[i-1].theta[WATER_PREF]*EMS[i-1].L; + const double s1=(1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(1.-EMS[i].PrefFlowArea); // theta_s matrix flow + const double s2=(1.-EMS[i-1].theta[ICE])*(Constants::density_ice/Constants::density_water)*(EMS[i-1].PrefFlowArea); // theta_s pref flow + const double dtheta_w_ideal = std::max(0., (-1.*(EMS[i].VG.theta_r*s2*EMS[i].L - tmp_theta_water_tot*s2) / (s2*EMS[i-1].L + (s1-EMS[i].VG.theta_r)*EMS[i].L) - EMS[i-1].theta[WATER_PREF])); + + const double dtheta_w=std::max(0., //No negative change! + std::min(std::max(dtheta_w_ideal, (EMS[i].theta[WATER]-matrix_threshold)*(EMS[i].L/EMS[i-1].L)) //The amount that is ideally transferred + , 0.999*( //Keep a bit of room + std::min((1.-EMS[i-1].theta[ICE])*(Constants::density_ice/Constants::density_water)*EMS[i-1].PrefFlowArea-EMS[i-1].theta[WATER_PREF], ((EMS[i].theta[WATER]-theta_d[i])*(EMS[i].L/EMS[i-1].L))) //Take MIN of: (i) Don't oversaturate preferential part, and (ii) don't take too much from the matrix part (TODO: actually, this should never happen.... remove it?) + ))); + if(WriteDebugOutput) printf("MATRIX->PREF [%d]: %f %f %f %f %f\n", int(i), EMS[i].theta[WATER], EMS[i].theta[WATER_PREF], EMS[i-1].theta[WATER], EMS[i-1].theta[WATER_PREF], dtheta_w); + EMS[i-1].lwc_source+=dtheta_w; // This works because preferential flow is executed after matrix flow, so the source/sink term will be used directly afterwards. + EMS[i].theta[WATER]-=dtheta_w*(EMS[i-1].L/EMS[i].L); + // After moving the water, adjust the other properties + EMS[i].theta[AIR]=1.-EMS[i].theta[WATER]-EMS[i].theta[WATER_PREF]-EMS[i].theta[ICE]-EMS[i].theta[SOIL]; + EMS[i].updDensity(); + EMS[i].M=EMS[i].Rho*EMS[i].L; + EMS[i-1].theta[AIR]=1.-EMS[i-1].theta[WATER]-EMS[i-1].theta[WATER_PREF]-EMS[i-1].theta[ICE]-EMS[i-1].theta[SOIL]; + EMS[i-1].updDensity(); + EMS[i-1].M=EMS[i-1].Rho*EMS[i-1].L; + } + + // If the matrix pressure head is larger than the preferential flow pressure head (ensured by std::max(0., ....)), we equalize both domains in terms of saturation. + // This is because in wet snow, the preferential flow part is also wet. Moreover, it enables a smaller capillary suction in the preferential flow domain, and allows the water to flow downwards + const double tmp_theta_water_tot = EMS[i].theta[WATER] + EMS[i].theta[WATER_PREF]; + const double dtheta_w = std::max(0., (-1. * ( (EMS[i].VG.theta_r - tmp_theta_water_tot) * (1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(EMS[i].PrefFlowArea) ) / ((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water) - EMS[i].VG.theta_r)) - EMS[i].theta[WATER_PREF]); + EMS[i].lwc_source+=dtheta_w; // This works because preferential flow is executed after matrix flow, so the source/sink term will be used directly afterwards. + EMS[i].theta[WATER]-=dtheta_w; + EMS[i].theta[AIR]=1.-EMS[i].theta[WATER]-EMS[i].theta[WATER_PREF]-EMS[i].theta[ICE]-EMS[i].theta[SOIL]; + EMS[i].updDensity(); + EMS[i].M=EMS[i].Rho*EMS[i].L; + } + } else { + // Now from preferential to matrix flow + // Reinitialize the PF->matrix transfer variable + EMS[i].theta_w_transfer = 0.; + if(i==Xdata.SoilNode) { + //For the snow layer just above the soil, we equalize the saturation in the matrix and preferential flow domain + //This leads to more realistic snowpack runoff behavior, as with the approach for the other snow layers, spiky behavior arises from whether or not pref_threshold is exceeded + const double tmp_theta_water_tot = EMS[i].theta[WATER] + EMS[i].theta[WATER_PREF]; + //Note that the std::max(0., ...) ensures that the water flow is from preferential flow to matrix flow domain + const double dtheta_w2 = std::max(0., EMS[i].theta[WATER_PREF] + ( (EMS[i].VG.theta_r - tmp_theta_water_tot) * (1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(EMS[i].PrefFlowArea) ) / ((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water) - EMS[i].VG.theta_r)); + //const double dtheta_w2 = std::min(EMS[i].theta[WATER_PREF], std::max(0., EMS[i].theta[WATER_PREF] + ( (EMS[i].VG.theta_r - tmp_theta_water_tot) * (1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(EMS[i].PrefFlowArea) ) / ((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water) - EMS[i].VG.theta_r))); + EMS[i].theta[WATER_PREF] -= dtheta_w2; + EMS[i].theta[WATER] += dtheta_w2; + // Increment the PF->matrix transfer variable + EMS[i].theta_w_transfer += dtheta_w2; + } else { + // For other snow layers than the lowest snow layer above the soil + if(EMS[i].theta[WATER_PREF]/((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(EMS[i].PrefFlowArea)) > pref_flow_param_th) { + // Using the code from PhaseChange.cc, we estimate the refreezing capacity + const double deltaT = EMS[i].meltfreeze_tk - EMS[i].Te; + // Adapt A to compute mass changes + double A = (EMS[i].c[TEMPERATURE] * EMS[i].Rho) / ( Constants::density_ice * Constants::lh_fusion ); + // Compute the change in volumetric ice and water contents + const double dth_i = - A * deltaT; + const double dth_w = std::min(EMS[i].theta[WATER_PREF]-theta_d[i], - (Constants::density_ice / Constants::density_water) * dth_i); + const double dtheta_w1 = std::max(0., //No negative change! + std::min((dth_w) //The amount that is ideally transferred + , 0.999*( //Keep a bit of room + (1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(1.-EMS[i].PrefFlowArea)-EMS[i].theta[WATER]) //Don't over-saturate matrix part + )); + + EMS[i].theta[WATER]+=dtheta_w1; + EMS[i].theta[WATER_PREF]-=dtheta_w1; + // Increment the PF->matrix transfer variable + EMS[i].theta_w_transfer += dtheta_w1; + + if(EMS[i].theta[WATER_PREF]/((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(EMS[i].PrefFlowArea)) > pref_flow_param_th) { + // This approach is equalizing both domains in case we still exceed the threshold: + const double tmp_theta_water_tot = EMS[i].theta[WATER] + EMS[i].theta[WATER_PREF]; + const double dtheta_w2 = std::max(0., EMS[i].theta[WATER_PREF] + ( (EMS[i].VG.theta_r - tmp_theta_water_tot) * (1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water)*(EMS[i].PrefFlowArea) ) / ((1.-EMS[i].theta[ICE])*(Constants::density_ice/Constants::density_water) - EMS[i].VG.theta_r)); + EMS[i].theta[WATER_PREF] -= dtheta_w2; + EMS[i].theta[WATER] += dtheta_w2; + // Increment the PF->matrix transfer variable + EMS[i].theta_w_transfer += dtheta_w2; + } + } + const double dx = sqrt((1. + EMS[i].PrefFlowArea)/(2. * Constants::pi)) - sqrt(EMS[i].PrefFlowArea/Constants::pi); // Estimate of the typical length scale that determines the gradients + + // Now consider refreeze due to temperature difference (mimicked by transferring water from preferential flow to matrix domain) + const double heat_flux = ((Constants::meltfreeze_tk - EMS[i].Te) / dx) * EMS[i].k[TEMPERATURE]; // Units: [W/m^2], Eq. 6 in Wever et al. (2016), TC. Note that the paper reports wrong units here. + const double theta_move = (pref_flow_param_N * 2. * sqrt(EMS[i].PrefFlowArea * Constants::pi) * heat_flux * sn_dt) / Constants::lh_fusion / Constants::density_water; // Units: [m^3/m^3], Eq. 7 in Wever et al. (2016), TC. Note that the paper reports a different and wrong equation here, which is not consistent with the units. + + // Make sure that theta[WATER_PREF] is not negative and do the actual transfer! + const double dtheta_w3 = std::max(0., std::min(std::min(EMS[i].theta[WATER_PREF]-theta_d[i], 0.999*(1.-EMS[i-1].theta[ICE])*(Constants::density_ice/Constants::density_water)*(1.-EMS[i-1].PrefFlowArea)-EMS[i].theta[WATER]), theta_move)); + EMS[i].theta[WATER]+=dtheta_w3; + EMS[i].theta[WATER_PREF]-=dtheta_w3; + // Increment the PF->matrix transfer variable + EMS[i].theta_w_transfer += dtheta_w3; + } + // Check for first wetting to set microstructural marker correctly only if not ice reservoir + if (!enable_ice_reservoir) { + // Check for first wetting to set microstructural marker correctly + if ((EMS[i].theta[WATER] > 5e-6 * sn_dt) && (EMS[i].mk%100 < 10)) { + EMS[i].mk += 10; + } + } + } + } else { // For soil, we suppress preferential flow + const double pref_threshold=0.; + if(EMS[i].theta[WATER_PREF]>pref_threshold) { + EMS[i].theta[WATER]+=(EMS[i].theta[WATER_PREF]-pref_threshold); + EMS[i].theta[WATER_PREF]=pref_threshold; + } + if(EMS[i].theta[WATER_PREF] 1.+Constants::eps2 || EMS[i].theta[ICE]<0.-Constants::eps2 || EMS[i].theta[ICE] > 1.+Constants::eps2) { - printf("ERROR at layer %d: sum=%f air=%f ice=%f soil=%f water=%f\n", i, sum, EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER]); + const double sum=EMS[i].theta[AIR] + EMS[i].theta[WATER] + EMS[i].theta[WATER_PREF] + EMS[i].theta[ICE] + EMS[i].theta[SOIL]; + if(EMS[i].theta[WATER]<0.-Constants::eps2 || EMS[i].theta[WATER_PREF]<0.-Constants::eps2 || EMS[i].theta[AIR]<0.-Constants::eps2 || EMS[i].theta[AIR] > 1.+Constants::eps2 || EMS[i].theta[ICE]<0.-Constants::eps2 || EMS[i].theta[ICE] > 1.+Constants::eps2) { + printf("ERROR in [%d] at layer %d: sum=%f air=%f ice=%f soil=%f water=%f water_pref=%f\n", (matrix)?(1):(0), int(i), sum, EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER], EMS[i].theta[WATER_PREF]); printf(" -- if this happens and ice<0, check theta_d. Maybe there was so much water created, that it was more than there was ice. This is not accounted for.\n"); throw; } if(sum > 1.+Constants::eps2) { - printf("ERROR at layer %d: sum=%f air=%f ice=%f soil=%f water=%f\n", i, sum, EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER]); + printf("ERROR in [%d] at layer %d: sum=%f air=%f ice=%f soil=%f water=%f water_pref=%f\n", (matrix)?(1):(0), int(i), sum, EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER], EMS[i].theta[WATER_PREF]); throw; } if(sum < 1.-Constants::eps2) { - printf("ERROR at layer %d: sum=%f air=%f ice=%f soil=%f water=%f\n", i, sum, EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER]); + printf("ERROR in [%d] at layer %d: sum=%f air=%f ice=%f soil=%f water=%f water_pref=%f\n", (matrix)?(1):(0), int(i), sum, EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER], EMS[i].theta[WATER_PREF]); throw; } } - for (i = toplayer-1; i >= 0; i--) { //We loop over all SNOWPACK layers ... - //Heat advection by water flow - double deltaN=0.; - if(i == int(nE)-1) { //HACK, TODO: remove type inconstency in comparison - deltaN=(delta_Te_adv[i] * (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L)) / (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L + 0.5*EMS[i-1].c[TEMPERATURE]*EMS[i-1].Rho*EMS[i-1].L); - } else { - if(i==0) { - deltaN=(delta_Te_adv[i] * (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L)) / (0.5*EMS[i+1].c[TEMPERATURE]*EMS[i+1].Rho*EMS[i+1].L + EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L); + i = uppernode + 1; + while (i-- > lowernode) { + if(nE > 1) { + //Heat advection by water flow + double deltaN=0.; + if(i == nE-1 && i > 0) { + deltaN=(delta_Te_adv[i] * (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L)) / (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L + 0.5*EMS[i-1].c[TEMPERATURE]*EMS[i-1].Rho*EMS[i-1].L); } else { - deltaN=(delta_Te_adv[i] * (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L)) / (0.5*EMS[i+1].c[TEMPERATURE]*EMS[i+1].Rho*EMS[i+1].L + EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L + 0.5*EMS[i-1].c[TEMPERATURE]*EMS[i-1].Rho*EMS[i-1].L); + if(i==0) { + deltaN=(delta_Te_adv[i] * (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L)) / (0.5*EMS[i+1].c[TEMPERATURE]*EMS[i+1].Rho*EMS[i+1].L + EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L); + } else { + deltaN=(delta_Te_adv[i] * (EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L)) / (0.5*EMS[i+1].c[TEMPERATURE]*EMS[i+1].Rho*EMS[i+1].L + EMS[i].c[TEMPERATURE]*EMS[i].Rho*EMS[i].L + 0.5*EMS[i-1].c[TEMPERATURE]*EMS[i-1].Rho*EMS[i-1].L); + } } + EMS[i].Qmf += (deltaN * EMS[i].c[TEMPERATURE] * EMS[i].Rho) / sn_dt; + } else { + // If there is only 1 element, we don't care about heat advection... } - NDS[i+1].T+=deltaN; - NDS[i].T+=deltaN; - if(fabs(deltaN)>0.) { - if(i < int(nE)-1) EMS[i+1].Te=0.5*(NDS[i+2].T+NDS[i+1].T); //HACK, TODO: remove type inconstency in comparison - EMS[i].Te=0.5*(NDS[i+1].T+NDS[i].T); - if(i > 0) EMS[i-1].Te=0.5*(NDS[i].T+NDS[i-1].T); - } - if (WriteOutNumerics_Level2==true) printf("SENDING at layer %d: sum=%f air=%.15f ice=%.15f soil=%.15f water=%.15f Te=%.15f\n", i, EMS[i].theta[AIR]+EMS[i].theta[ICE]+EMS[i].theta[SOIL]+EMS[i].theta[WATER], EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER], EMS[i].Te); - } - - double totalwateroverflow=0.; //Total water outflow due to numerical issues (requiring minimum theta_r, maximum theta_s, etc), in m^3/m^2 - for (i = uppernode; i>=lowernode; i--) { - totalwateroverflow+=wateroverflow[i]*dz[i]; - if(i==nsoillayers_richardssolver) { - // I decided to put all wateroverflow from snow directly in the snowsoilinterfaceflux, although the wateroverflow may occur somewhere in the snowpack. - snowsoilinterfaceflux1+=totalwateroverflow; - } + if (WriteDebugOutput) printf("SENDING at layer %d: sum=%f air=%.15f ice=%.15f soil=%.15f water=%.15f water_pref=%.15f Te=%.15f h=%f %f\n", int(i), EMS[i].theta[AIR]+EMS[i].theta[ICE]+EMS[i].theta[SOIL]+EMS[i].theta[WATER]+EMS[i].theta[WATER_PREF], EMS[i].theta[AIR], EMS[i].theta[ICE], EMS[i].theta[SOIL], EMS[i].theta[WATER], EMS[i].theta[WATER_PREF], EMS[i].Te, EMS[i].h, EMS[i].VG.fromTHETAtoH(EMS[i].theta[WATER], h_d)); } - if(WriteOutNumerics_Level1==true) { - printf("ACTUALTOPFLUX: [ BC: %d ] %.15f %.15f %.15f CHK: %.15f %f\n", TopBC, actualtopflux/snowpack_dt, refusedtopflux/snowpack_dt, surfacefluxrate, actualtopfluxcheck/snowpack_dt, (surfacefluxrate!=0.)?(actualtopflux/snowpack_dt)/surfacefluxrate:0.); - printf("ACTUALBOTTOMFLUX: [ BC: %d ] %.15f %.15f %.15f %f K_ip1=%.15f\n", BottomBC, actualbottomflux, actualbottomflux/snowpack_dt, BottomFluxRate, (BottomFluxRate!=0.)?(actualbottomflux/snowpack_dt)/BottomFluxRate:0., k_np1_m_ip12[lowernode]); - // This is more or less for testing only. This snowsoilinterfaceflux should anyway be stored in MS_SNOWPACK_RUNOFF and found in the met file - printf("SNOWSOILINTERFACEFLUX: %.15f %.15f\n", snowsoilinterfaceflux1/snowpack_dt, snowsoilinterfaceflux2/snowpack_dt); + if(WriteDebugOutput) { + printf("ACTUALTOPFLUX: [ BC: %d ] %.15f %.15f %.15f CHK: %f\n", TopBC, actualtopflux/sn_dt, refusedtopflux/sn_dt, surfacefluxrate, (surfacefluxrate!=0.)?(actualtopflux/sn_dt)/surfacefluxrate:0.); + printf("ACTUALBOTTOMFLUX: [ BC: %d ] %.15f %.15f %.15f %f SNOWSOILINTERFACEFLUX=%.15f\n", BottomBC, actualbottomflux, actualbottomflux/sn_dt, BottomFluxRate, (BottomFluxRate!=0.)?(actualbottomflux/sn_dt)/BottomFluxRate:0., snowsoilinterfaceflux/sn_dt); } - if(WriteOutNumerics_Level0==true) printf("WATERBALANCE: %.15f %.15f %.15f CHK1: %.15f CHK2: %.15f WATEROVERFLOW: %.15f MB_ERROR: %.15f\n", actualtopflux/snowpack_dt, refusedtopflux/snowpack_dt, surfacefluxrate, (surfacefluxrate!=0.)?(actualtopflux/snowpack_dt)/surfacefluxrate:0., actualtopfluxcheck/snowpack_dt, totalwateroverflow, massbalanceerror_sum); + if(WriteDebugOutput) printf("WATERBALANCE: %.15f %.15f %.15f CHK1: %.15f MB_ERROR: %.15f\n", actualtopflux/sn_dt, refusedtopflux/sn_dt, surfacefluxrate, (surfacefluxrate!=0.)?(actualtopflux/sn_dt)/surfacefluxrate:0., massbalanceerror_sum); //Update soil runoff (mass[MS_SOIL_RUNOFF] = kg/m^2). Note: it does not matter whether SNOWPACK is run with soil or not. MS_SOIL_RUNOFF is always runoff from lower boundary. Sdata.mass[SurfaceFluxes::MS_SOIL_RUNOFF] += actualbottomflux*Constants::density_water; // Update snow pack runoff (mass[MS_SNOWPACK_RUNOFF] = kg/m^2 (almost equal to mm/m^2), surfacefluxrate=m^3/m^2/s and snowsoilinterfaceflux = m^3/m^2): // NOTE: snowsoilinterfaceflux will only be non-zero IF there is a snowpack AND we solve the richards equation also for snow! Else, snowpack runoff is calculated in the original WaterTransport functions. - Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += snowsoilinterfaceflux1*Constants::density_water; + Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += snowsoilinterfaceflux*Constants::density_water; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += snowsoilinterfaceflux*Constants::density_water; //Deal with the situation that evaporation flux was limited in case of snow. Then, sublimate ice matrix. - if (refusedtopflux<0. && toplayer>nsoillayers_snowpack) { + if (refusedtopflux<0. && uppernode+1>=Xdata.SoilNode) { //Be careful: refusedtopflux = m^3/m^2 and not m^3/m^2/s!!! - //Now invert the calculation of ql, using refusedtopflux. This amount of ql should be used for sublimation. - double ql=(refusedtopflux/sn_dt)*Constants::density_water*Constants::lh_vaporization; - - double dL=0.; - std::vector M_Solutes(Xdata.number_of_solutes, 0.); // Mass of solutes from disappearing phases - size_t e = nE-1; - while ((e >= Xdata.SoilNode) && (ql < -Constants::eps2)) { // While energy is available and we are in snow - const double L0 = EMS[e].L; - // If there is no water or if there was not enough water ... - // Note: as we do not pass through mergeElements anymore, we must assure that elements do not disappear here. - // By specifying a minimum value just below the Snowpack::min_ice_content, we make sure the element gets removed the next time it passes mergeElements. - const double theta_i0 = std::max(0., EMS[e].theta[ICE] - (0.99*Snowpack::min_ice_content)); - double M = theta_i0*Constants::density_ice*L0; - double dM = ql*sn_dt/Constants::lh_sublimation; - if (-dM > M) dM = -M; - - dL = dM/(EMS[e].Rho); - if (e < Xdata.SoilNode) { - dL = 0.; - } - NDS[e+1].z += dL; EMS[e].L0 = EMS[e].L = L0 + dL; - NDS[e+1].z += NDS[e+1].u; NDS[e+1].u = 0.0; - - EMS[e].E = EMS[e].Eps = EMS[e].dEps = EMS[e].Eps_e = EMS[e].Eps_v = EMS[e].S = 0.0; - EMS[e].theta[ICE] *= L0/EMS[e].L; - EMS[e].theta[ICE] += dM/(Constants::density_ice*EMS[e].L); - EMS[e].theta[WATER] *= L0/EMS[e].L; - for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { - EMS[e].conc[ICE][ii] *= L0*theta_i0/(EMS[e].theta[ICE]*EMS[e].L); - } - - EMS[e].M += dM; - // Instead of evaporating, we sublimate the ice matrix: - Sdata.mass[SurfaceFluxes::MS_EVAPORATION] -= dM*(Constants::lh_sublimation/Constants::lh_vaporization); //Correct evaporation for sublimated mass - Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dM; //Add mass to sublimation - ql -= dM*Constants::lh_sublimation/sn_dt; // Update the energy used - - //Update volumetric contents - EMS[e].theta[AIR]=1.-EMS[e].theta[ICE]-EMS[e].theta[WATER]-EMS[e].theta[SOIL]; - EMS[e].Rho = (EMS[e].theta[ICE] * Constants::density_ice) + (EMS[e].theta[WATER] * Constants::density_water) + (EMS[e].theta[SOIL] * EMS[e].soil[SOIL_RHO]); - EMS[e].heatCapacity(); - - e--; + //Now invert the calculation of ql, using refusedtopflux. This amount of ql could not be used for evaporation and should be used for sublimation. + ql += (refusedtopflux/sn_dt)*Constants::density_water*Constants::lh_vaporization; + refusedtopflux = 0.; + //First, we fully intepreted ql as evaporation. Now, remaining energy (ql) should not be counted as evaporation + Sdata.mass[SurfaceFluxes::MS_EVAPORATION] -= ql*sn_dt/Constants::lh_vaporization; + if(Xdata.swe < Constants::eps2) { + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] -= ql*sn_dt/Constants::lh_vaporization; + } + if(uppernode+1==Xdata.SoilNode) { + //The energy is substracted from the top element + //const double tmp_delta_Te = ql / (EMS[Xdata.SoilNode-1].c[TEMPERATURE] * EMS[Xdata.SoilNode-1].Rho); + //NDS[Xdata.SoilNode].T += 2.*tmp_delta_Te; + //EMS[Xdata.SoilNode-1].Te += tmp_delta_Te; } - //Remaining energy should go back again into refusedtopflux and also should not be counted as evaporation - Sdata.mass[SurfaceFluxes::MS_EVAPORATION]-=ql*sn_dt/Constants::lh_vaporization; - refusedtopflux=std::min(0., (ql*sn_dt)/(Constants::density_water*Constants::lh_vaporization)); - } - if(refusedtopflux<0. && toplayer==nsoillayers_snowpack) { - //Be careful: refusedtopflux = m^3/m^2 and not m^3/m^2/s!!! - //Now invert the calculation of ql, using refusedtopflux. This amount of ql should be used for sublimation. - double ql=(refusedtopflux/sn_dt)*Constants::density_water*Constants::lh_vaporization; - refusedtopflux=0.; - //Remaining energy should not be counted as evaporation - Sdata.mass[SurfaceFluxes::MS_EVAPORATION]-=ql*sn_dt/Constants::lh_vaporization; - //The energy is substracted from the top element - const double tmp_delta_Te = ql / (EMS[nsoillayers_snowpack-1].c[TEMPERATURE] * EMS[nsoillayers_snowpack-1].Rho); - NDS[nsoillayers_snowpack].T += 2.*tmp_delta_Te; - EMS[nsoillayers_snowpack-1].Te += tmp_delta_Te; } //If we could not handle all incoming water at top boundary AND we have snow AND we solve RE for snow: - if(refusedtopflux>0. && toplayer>int(Xdata.SoilNode)) { + if(refusedtopflux>0. && uppernode+1>Xdata.SoilNode) { Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += refusedtopflux*Constants::density_water; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += refusedtopflux*Constants::density_water; } - //If we could not handle all snowpack runoff when not modelling snow with RE: - if(refusedtopflux>0. && toplayer==int(Xdata.SoilNode) && Xdata.getNumberOfElements()>Xdata.SoilNode ){ - Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += refusedtopflux*Constants::density_water; + + //If we could not handle all snowpack runoff when not modelling snow with RE, add water layer + if(allow_surface_ponding == true && refusedtopflux > Constants::eps) { + Xdata.resize(nE+1); + const size_t newnE = Xdata.getNumberOfElements(); + Xdata.Edata[newnE-1] = Xdata.Edata[Xdata.getNumberOfElements()-2]; + Xdata.Ndata[Xdata.getNumberOfNodes()-1] = Xdata.Ndata[Xdata.getNumberOfNodes()-2]; + Xdata.Edata[newnE-1].theta[WATER] = 1.; + Xdata.Edata[newnE-1].theta[WATER_PREF] = 0.; + Xdata.Edata[newnE-1].theta[ICE] = 0.; + Xdata.Edata[newnE-1].theta[AIR] = 0.; + Xdata.Edata[newnE-1].theta[SOIL] = 0.; + Xdata.Edata[newnE-1].L = Xdata.Edata[newnE-1].L0 = refusedtopflux; + EMS[newnE-1].updDensity(); + EMS[newnE-1].M = EMS[newnE-1].L*EMS[newnE-1].Rho; + EMS[newnE-1].Te = (backupWATERLAYER_Te != Constants::undefined) ? (backupWATERLAYER_Te) : NDS[newnE-2].T; + NDS[newnE].T = NDS[newnE-1].T = NDS[newnE-2].T = EMS[newnE-1].Te; + Xdata.Edata[newnE-1].mk = 19; // Mark the layer as a water layer + prn_msg( __FILE__, __LINE__, "wrn", date, "Ponding occuring, water layer added! [depth = %lf m]", Xdata.Edata[newnE-1].L); } - //We want wateroverflow in the snow to be a source/sink term. Therefore, these lines are inactive. - //if(totalwateroverflow>0. && toplayer>Xdata.SoilNode) { - // Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += totalwateroverflow*Constants::density_water; - //} - surfacefluxrate=0.; //As we now have used the rate for the current time step, reset the value. + surfacefluxrate=0.; //As we now have used the rate for the current time step, reset the value. - //Now calculate freezing point depression: - for (i = 0; i<=toplayer-1; i++) { - if(EMS[i].theta[SOIL]calculateMeltingTemperature(EMS[i].salinity)):(Constants::meltfreeze_tk)); + } else { //For soil layers solved with Richards Equation, everything (water transport and phase change) is done in this routine, except calculating the heat equation. //To suppress phase changes in PhaseChange.cc, set the melting and freezing temperature equal to the element temperature: - EMS[i].freezing_tk=EMS[i].melting_tk=EMS[i].Te; - } else { - EMS[i].freezing_tk=EMS[i].melting_tk=T_melt[i]; - //This is a trick. Now that we deal with phase change in soil right here, we set the melting and freezing temperatures equal to the current Element temperature, so that - //in CompTemperatures, the element temperature will not be adjusted to freezing temperature just because there is water in it! - EMS[i].Te=std::max(EMS[i].Te, T_0); //Because we don't allow soil freezing, soil remains 0 degC. - NDS[i].T=std::max(NDS[i].T, T_0); - NDS[i+1].T=std::max(NDS[i+1].T, T_0); - EMS[i].melting_tk=EMS[i].Te; - EMS[i].freezing_tk=EMS[i].Te; + EMS[i].meltfreeze_tk=std::min(Constants::meltfreeze_tk, EMS[i].Te); } } - if(WriteOutNumerics_Level2==true) - std::cout << "EMS[" << i << "].melting_tk = " << EMS[i].melting_tk << ", EMS[" << i << "].freezing_tk = " << EMS[i].freezing_tk << " (ice: " << EMS[i].theta[ICE] << ")\n"; } - //print solver statistics - if(WriteOutNumerics_Level0==true) { - std::cout << "SOLVERSTATISTICS: max_dt= " << std::setprecision(5) << stats_max_dt << " min_dt= " << std::setprecision(20) << stats_min_dt << std::setprecision(6) << " nsteps_total= " << stats_nsteps << " niter_total= " << stats_niters << " nrewinds_total= " << stats_nrewinds << " Last active solver: "; - switch (ActiveSolver) { - case DGESVD: - std::cout << "DGESVD/DGESDD."; - break; - case DGTSV: - std::cout << "DGTSV."; - break; - case TDMA: - std::cout << "TDMA."; - break; - } - std::cout << " BS_avg_iter: " << double(double(bs_stats_totiter)/double(stats_niters*nsoillayers_richardssolver)) << " BS_max_iter: " << bs_stats_maxiter << "\n"; - } + return; } #ifdef __clang__ diff --git a/third_party/snowpack/snowpackCore/ReSolver1d.h b/third_party/snowpack/snowpackCore/ReSolver1d.h index 89a756a5..be82411b 100644 --- a/third_party/snowpack/snowpackCore/ReSolver1d.h +++ b/third_party/snowpack/snowpackCore/ReSolver1d.h @@ -25,68 +25,81 @@ #ifndef RESOLVER1D_H #define RESOLVER1D_H +#include "SalinityTransport.h" #include "../DataClasses.h" -#include "../SnowpackConfig.h" -#include -#include /** * @class ReSolver1d - * @version 10.02 * @author Nander Wever - * @bug Prone to bugs at any changes! Be aware! * @brief This module contains the solver for the 1d Richards Equation for the 1d snowpack model */ class ReSolver1d { public: - ReSolver1d(const SnowpackConfig& cfg); // Class constructor - void SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata); + ReSolver1d(const SnowpackConfig& cfg, const bool& matrix_part); // Class constructor + void SolveRichardsEquation(SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql, const mio::Date& date); + + double surfacefluxrate; // Surfacefluxrate for solving RE. It is either surface of snow, in case of snowpack and solving RE for snow, or surface of soil, when no snowpack and/or solving RE only for soil. + double soilsurfacesourceflux; // Soilsurfacesourceflux for solving RE. This is used when we use RE for snow AND there is a snowpack AND the lowest snow element is removed. - double surfacefluxrate; // Surfacefluxrate for solving RE. It is either surface of snow, in case of snowpack and solving RE for snow, or surface of soil, when no snowpack and/or solving RE only for soil. - double soilsurfacesourceflux; // Soilsurfacesourceflux for solving RE. This is used when we use RE for snow AND there is a snowpack AND the lowest snow element is removed. + const static double max_theta_ice; // Maximum allowed theta[ICE]. RE always need some pore space, which is defined by this value. + const static double REQUIRED_ACCURACY_THETA; + // Solvers + static int TDMASolver (size_t n, double *a, double *b, double *c, double *v, double *x); // Thomas algorithm for tridiagonal matrices + static int pinv(int m, int n, int lda, double *a); // Full matrix inversion private: std::string variant; //To prevent string comparisons, we define an enumerated list: enum watertransportmodels{UNDEFINED, BUCKET, NIED, RICHARDSEQUATION}; - //Soil types - enum SoilTypes{ORGANIC, CLAY, CLAYLOAM, LOAM, LOAMYSAND, SAND, SANDYCLAY, SANDYCLAYLOAM, SANDYLOAM, SILT, SILTYCLAY, SILTYCLAYLOAM, SILTLOAM, WFJGRAVELSAND}; - //Hydraulic conductivity parameterizations - enum K_Parameterizations{SHIMIZU, CALONNE}; //K_Average types - enum K_AverageTypes{ARITHMETICMEAN, GEOMETRICMEAN, HARMONICMEAN, MINIMUMVALUE, UPSTREAM}; - //Van genuchten model types - enum VanGenuchten_ModelTypesSnow{YAMAGUCHI2012, YAMAGUCHI2010, YAMAGUCHI2010_ADAPTED, DAANEN}; + enum K_AverageTypes{ARITHMETICMEAN, LOGMEAN, GEOMETRICMEAN, HARMONICMEAN, MINIMUMVALUE, UPSTREAM}; + //K_frozen_soil types + enum K_frozen_soilTypes{IGNORE, OMEGA, LIQUIDPORESPACE}; //Solvers enum SOLVERS{DGESVD, DGTSV, TDMA}; //Boundary conditions - enum BoundaryConditions{DIRICHLET, NEUMANN, LIMITEDFLUXEVAPORATION, LIMITEDFLUXINFILTRATION, LIMITEDFLUX, WATERTABLE, FREEDRAINAGE, GRAVITATIONALDRAINAGE, SEEPAGEBOUNDARY}; - - + enum BoundaryConditions{DIRICHLET, NEUMANN, LIMITEDFLUXEVAPORATION, LIMITEDFLUXINFILTRATION, LIMITEDFLUX, WATERTABLE, FREEDRAINAGE, GRAVITATIONALDRAINAGE, SEEPAGEBOUNDARY, SEAICE}; + //Salinity mixing models + enum SalinityMixingModels{NONE, CAPILLARY_GRAVITY, DENSITY_DIFFERENCE, DENSITY_GRAVITY}; + watertransportmodels iwatertransportmodel_snow, iwatertransportmodel_soil; std::string watertransportmodel_snow; std::string watertransportmodel_soil; - BoundaryConditions BottomBC; //Bottom boundary condition (recommended choice either DIRICHLET with saturation (lower boundary in water table) or FREEDRAINAGE (lower boundary not in water table)) - K_AverageTypes K_AverageType; //Implemented choices: ARITHMETICMEAN (recommended), HARMONICMEAN, GEOMETRICMEAN, MINIMUMVALUE, UPSTREAM + BoundaryConditions BottomBC; //Bottom boundary condition (recommended choice either DIRICHLET with saturation (lower boundary in water table) or FREEDRAINAGE (lower boundary not in water table)) + K_AverageTypes K_AverageType; //Implemented choices: ARITHMETICMEAN (recommended), LOGMEAN, GEOMETRICMEAN, HARMONICMEAN, MINIMUMVALUE, UPSTREAM + K_frozen_soilTypes K_frozen_soilType; //Implemented choices: IGNORE (recommended), OMEGA, LIQUIDPORESPACE + double omega; //The value of omega to use when K_frozen_soilType == OMEGA. + bool enable_pref_flow; //true: dual domain approach, false: classic Richards equation. + double pref_flow_param_th; //Tuning parameter: saturation threshold in preferential flow + double pref_flow_param_N; //Tuning parameter: number of preferential flow paths for heat exchange + double pref_flow_param_heterogeneity_factor; //Tuning parameter: heterogeneity factor for grain size + bool enable_ice_reservoir; // Ice reservoir or not + bool runSoilInitializer; // Run the function that initializes the soil in thermal equilibrium upon first function call - double sn_dt; - bool useSoilLayers, water_layer; + double sn_dt; //SNOWPACK time step + bool allow_surface_ponding; //boolean to switch on/off the formation of surface ponds in case prescribed infiltration flux exceeds matrix capacity + bool matrix; //boolean to define if water transport is calculated for matrixflow or preferential flow + SalinityTransport::SalinityTransportSolvers SalinityTransportSolver; //How to solve salinity transport? + // Grid info + std::vector dz; //Layer height (in meters) + std::vector z; //Height above the surface (so -1 is 1m below surface) + std::vector dz_up; //Distance to upper node (in meters) + std::vector dz_down; //Distance to lower node (in meters) + std::vector dz_; //Layer distance for the finite differences, see Rathfelder (2004). - // Van Genuchten functions - double fromTHETAtoH(double theta, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e, double h_d); - double fromTHETAtoHforICE(double theta, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e, double h_d, double theta_i); - double fromHtoTHETA(double h, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e); - double fromHtoTHETAforICE(double h, double theta_r, double theta_s, double alpha, double m, double n, double Sc, double h_e, double theta_i); - double AirEntryPressureHead(double MaximumPoreSize, double Temperature); - void SetSoil(SoilTypes type, double *theta_r, double *theta_s, double *alpha, double *m, double *n, double *ksat, double *he); + // General functions + void InitializeGrid(const std::vector& EMS, const size_t& lowernode, const size_t& uppernode); + std::vector AssembleRHS(const size_t& lowernode, const size_t& uppernode, const std::vector& h_np1_m, const std::vector& theta_n, const std::vector& theta_np1_m, const std::vector& theta_i_n, const std::vector& theta_i_np1_m, const std::vector& s, const double& dt, const std::vector& rho, const std::vector& k_np1_m_im12, const std::vector& k_np1_m_ip12, const BoundaryConditions aTopBC, const double& TopFluxRate, const BoundaryConditions aBottomBC, const double& BottomFluxRate, const SnowStation& Xdata, SalinityTransport& Salinity, const SalinityMixingModels& SALINITY_MIXING); - // Solvers - int TDMASolver (int n, double *a, double *b, double *c, double *v, double *x); - int pinv(int m, int n, int lda, double *a); + // Solver control variables + const static double REQUIRED_ACCURACY_H, convergencecriterionthreshold, MAX_ALLOWED_DELTA_H; + const static size_t INCR_ITER, DECR_ITER, MAX_ITER, BS_MAX_ITER; + const static double MIN_VAL_TIMESTEP, MAX_VAL_TIMESTEP, MIN_DT_FOR_INFILTRATION; + const static double SF_epsilon; }; -#endif //End of WaterTransport.h +#endif //End of ReSolver1d.h diff --git a/third_party/snowpack/snowpackCore/SalinityTransport.cc b/third_party/snowpack/snowpackCore/SalinityTransport.cc new file mode 100644 index 00000000..76046e9f --- /dev/null +++ b/third_party/snowpack/snowpackCore/SalinityTransport.cc @@ -0,0 +1,489 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ + +#include "SalinityTransport.h" +#include "ReSolver1d.h" +#include "../Utils.h" +#include + +static const bool ZeroFluxLowerBoundary_diffusion = false; +static const bool ZeroFluxUpperBoundary_diffusion = true; +static const bool ZeroFluxLowerBoundary_advection = false; +static const bool ZeroFluxUpperBoundary_advection_in = false; // For incoming flux: set to false would reflect fresh water influx in case of rain or condensation +static const bool ZeroFluxUpperBoundary_advection_out = true; // For outgoing flux: set to true would reflect that evaporation would only consist of fresh water + +#ifdef CLAPACK + // Matching C data types with FORTRAN data types (taken from f2c.h): + typedef long int integer; + typedef double doublereal; + + // Declare the function interfaces with the LAPACK library (taken from clapack.h): + extern "C" { + /* Subroutine */ int dgesvd_(char *jobu, char *jobvt, integer *m, integer *n, + doublereal *a, integer *lda, doublereal *s, doublereal *u, integer * + ldu, doublereal *vt, integer *ldvt, doublereal *work, integer *lwork, + integer *info); + + /* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal * + a, integer *lda, doublereal *s, doublereal *u, integer *ldu, + doublereal *vt, integer *ldvt, doublereal *work, integer *lwork, + integer *iwork, integer *info); + + /* Subroutine */ int dgtsv_(integer *n, integer *nrhs, doublereal *dl, + doublereal *d__, doublereal *du, doublereal *b, integer *ldb, integer + *info); + } +#endif + + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wsign-conversion" +#endif + + +/** + * @brief Class for solving diffusion-advection equation for salinity using the Crank-Nicolson implicit method\n + * Solve Richards Equation \n + * @author Nander Wever + * @param nE Domain size (number of elements) + */ +SalinityTransport::SalinityTransport(const size_t nE) + : flux_up(), flux_down(), flux_up_2(), flux_down_2(),dz_(), dz_up(), dz_down(), theta1(), theta2(), BrineSal(), D(), sb(), BottomSalinity(0.), TopSalinity(0.), + BottomSalFlux(0.), TopSalFlux(0.), NumberOfElements(0) +{ + SetDomainSize(nE); +} + + +/** + * @brief Resizing vectors to match given domain size \n + * @author Nander Wever + * @param nE Domain size (number of elements) + */ +void SalinityTransport::SetDomainSize(size_t nE) { + NumberOfElements = nE; + + flux_up.resize(nE, 0.); + flux_down.resize(nE, 0.); + flux_up_2.resize(nE, 0.); + flux_down_2.resize(nE, 0.); + dz_.resize(nE, 0.); + dz_up.resize(nE, 0.); + dz_down.resize(nE, 0.); + theta1.resize(nE, 0.); + theta2.resize(nE, 0.); + BrineSal.resize(nE, 0.); + D.resize(nE, 0.); + sb.resize(nE, 0.); + return; +} + + +/** + * @brief Solve diffusion-advection equation using the Crank-Nicolson implicit, or fully implicit method\n + * @author Nander Wever + * @details This function solves the following equation (n and i denoting time and spatial level, respectively): +\f[ +\begin{multlined} +\frac{ \left ( \theta^{n+1}_i S_{\mathrm{b}, i}^{n+1} - \theta^{n}_i S_{\mathrm{b}, i}^{n} \right ) } { \Delta t } \\ +- f \left [ \left ( \frac{ 2 D_{i+1}^{n} \theta^{n+1}_{i+1} S_{\mathrm{b}, i+1}^{n+1} }{ \Delta z_{\mathrm{up}} \left ( \Delta z_{\mathrm{up}} + \Delta z_{\mathrm{down}} \right ) } - \frac{ 2 D_{i}^{n} \theta^{n+1}_{i} S_{\mathrm{b}, i}^{n+1} }{\left ( \Delta z_{\mathrm{up}} \Delta z_{\mathrm{down}} \right ) } + \frac{ D_{i-1}^{n} \theta^{n+1}_{i-1} S_{\mathrm{b}, i-1}^{n+1} }{ \Delta z_{\mathrm{down}} \left ( \Delta z_{\mathrm{up}} + \Delta z_{\mathrm{down}} \right ) } \right ) \right ] \\ +- \left ( 1-f \right ) \left [ \left ( \frac{ 2 D_{i+1}^{n} \theta^{n}_{i+1} S_{\mathrm{b}, i+1}^{n} }{ \Delta z_{\mathrm{up}} \left ( \Delta z_{\mathrm{up}} + \Delta z_{\mathrm{down}} \right ) } - \frac{ 2 D_{i}^{n} \theta^{n}_{i} S_{\mathrm{b}, i}^{n} }{\left ( \Delta z_{\mathrm{up}} \Delta z_{\mathrm{down}} \right ) } + \frac{ D_{i-1}^{n} \theta^{n}_{i-1} S_{\mathrm{b}, i-1}^{n} }{ \Delta z_{\mathrm{down}} \left ( \Delta z_{\mathrm{up}} + \Delta z_{\mathrm{down}} \right ) } \right ) \right ] \\ +- f \left [ \left ( \frac{q^{n}_{i+1} S_{\mathrm{b},i+1}^{n+1} - q^{n}_{i-1} S_{\mathrm{b},i-1}^{n+1}}{\left ( \Delta z_{\mathrm{up}} + \Delta z_{\mathrm{down}} \right ) } \right ) \right ] - \left ( 1-f \right ) \left [ \left ( \frac{q^{n}_{i+1} S_{\mathrm{b},i+1}^{n} - q^{n}_{i-1} S_{\mathrm{b},i-1}^{n}}{\left ( \Delta z_{\mathrm{up}} + \Delta z_{\mathrm{down}} \right ) } \right ) \right ] - s_{\mathrm{sb}} = 0 +\end{multlined} +\f] +Here, \f$f=1\f$ results in the fully implicit scheme, whereas \f$f=0.5\f$ corresponds to the Crank-Nicolson scheme. The implicit scheme is first order accurate, whereas the Crank-Nicolson scheme is second order accurate. Furthermore, both are unconditionally stable and suffer only minimal numerical diffusion for the advection part. As with many other common schemes, the advection part is not perfectly conserving sharp transitions. Futhermore, the reason to not use the fully implicit or the Crank Nicolson scheme is the occurrence of spurious oscillations in the solution, which negatively impact the accuracy of the simulations more than the negative effect on computational efficiency imposed by the CFL criterion required for the explicit method (see SalinityTransport::SolveSalinityTransportEquationExcplicit). + * @param dt Time step (s) + * @param DeltaSal Result vector (change in salinity over time step) + * @param f Set to 0.5 for Crank-Nicolson, or to 1.0 for fully implicit + * @param DonorCell If true, use mass-conserving donor-cell scheme (upwind). If false, use default implicit discretization + * @return false on error, true otherwise + */ +bool SalinityTransport::SolveSalinityTransportEquationImplicit(const double dt, std::vector &DeltaSal, const double f, const bool DonorCell) { + + if(NumberOfElements==0) return false; // Nothing to do + + const bool WriteDebugOutput = false; + const bool UpstreamBoundaries = true; + if(WriteDebugOutput) setvbuf(stdout, NULL, _IONBF, 0); + + // Declare and initialize l.h.s. matrix and r.h.s. vector + std::vector ad(NumberOfElements, 0.); // Matrix diagonal + std::vector adu(NumberOfElements-1, 0.); // Matrix upper diagonal + std::vector adl(NumberOfElements-1, 0.); // Matrix lower diagonal + std::vector b(NumberOfElements, 0.); // Vector + + if((ZeroFluxUpperBoundary_advection_in && flux_up[NumberOfElements-1] > 0.) + || (ZeroFluxUpperBoundary_advection_out && flux_up[NumberOfElements-1] < 0.)) flux_up[NumberOfElements-1] = 0.; + if(ZeroFluxLowerBoundary_advection) flux_down[0] = 0.; + + // Fill matrix and r.h.s. vector + for(size_t i = 0; i < NumberOfElements; i++) { + + // The matrix diagonal, the time derivative: + ad[i] += theta2[i] / dt; + + + // The matrix diagonal, the diffusion part: + if(ZeroFluxLowerBoundary_diffusion && i==0) { + ad[i] += f * (2. * D[i] * theta2[i]) / (dz_up[i] * (dz_up[i] + dz_down[i])); + } else if(ZeroFluxUpperBoundary_diffusion && i==NumberOfElements-1) { + ad[i] += f * (2. * D[i] * theta2[i]) / (dz_down[i] * (dz_up[i] + dz_down[i])); + } else { + ad[i] += f * (2. * D[i] * theta2[i]) / (dz_up[i] * dz_down[i]); + } + + + // The lower diagonal + if(i==0) { + // the diffusion part: + if(ZeroFluxLowerBoundary_diffusion) { + // ZeroFluxLowerBoundary_diffusion: no contribution from below + } else { + // The diffusion term from below is added to the r.h.s. + } + + // the advection part from below is a constant flux and is added to the r.h.s., except when UpstreamBoundaries are used + if (UpstreamBoundaries && flux_down[i] > 0.) { + ad[i] += f * flux_down[i] / (dz_up[i] + dz_down[i]); + } + } else if(i==NumberOfElements-1) { + // the diffusion part: + if(NumberOfElements>1) adl[i-1] += -f * 2. * D[i-1] * theta2[i-1] / (dz_down[i] * (dz_up[i] + dz_down[i])); + + // the advection part: + if(!DonorCell || flux_down[i] < 0.) { + adl[i-1] += f * flux_down[i] / (dz_up[i] + dz_down[i]) / ((DonorCell) ? (0.5) : (1.)); + } else { + ad[i] += f * flux_down[i] / (dz_up[i] + dz_down[i]) / 0.5; + } + } else { + // the diffusion part: + adl[i-1] += -f * 2. * D[i-1] * theta2[i-1] / (dz_down[i] * (dz_up[i] + dz_down[i])); + + // the advection part: + if(!DonorCell || flux_down[i] < 0.) { + adl[i-1] += f * flux_down[i] / (dz_up[i] + dz_down[i]) / ((DonorCell) ? (0.5) : (1.)); + } else { + ad[i] += f * flux_down[i] / (dz_up[i] + dz_down[i]) / 0.5; + } + } + + + // The upper diagonal + if(i==0) { + // the diffusion part: + if(NumberOfElements>1) adu[i] += -f * 2. * D[i+1] * theta2[i+1] / (dz_up[i] * (dz_up[i] + dz_down[i])); + + // the advection part: + if(!DonorCell || flux_up[i] > 0.) { + adu[i] += -f * flux_up[i] / (dz_up[i] + dz_down[i]) / ((DonorCell) ? (0.5) : (1.)); + } else { + ad[i] += -f * flux_up[i] / (dz_up[i] + dz_down[i]) / 0.5; + } + } else if(i==NumberOfElements-1) { + if(ZeroFluxUpperBoundary_diffusion) { + // ZeroFluxUpperBoundary: no contribution from above + } else { + // The diffusion term from above is added to the r.h.s. + } + + // the advection part from above is a constant flux and is added to the r.h.s. + if (UpstreamBoundaries && flux_up[i] < 0.) { + ad[i] += f * flux_up[i] / (dz_up[i] + dz_down[i]); + } + } else { + // the diffusion part: + adu[i] += -f * 2. * D[i+1] * theta2[i+1] / (dz_up[i] * (dz_up[i] + dz_down[i])); + + // the advection part: + if(!DonorCell || flux_up[i] > 0.) { + adu[i] += -f * flux_up[i] / (dz_up[i] + dz_down[i]) / ((DonorCell) ? (0.5) : (1.)); + } else { + ad[i] += -f * flux_up[i] / (dz_up[i] + dz_down[i]) / 0.5; + } + } + + + // The r.h.s. vector time derivative: + b[i] += (theta1[i] * BrineSal[i]) / dt; + + + // The r.h.s. vector diffusion part: + if(i==NumberOfElements-1) { + // No flux upper boundary for diffusion (mirroring the i-1 node) + if(ZeroFluxUpperBoundary_diffusion) { + if(NumberOfElements>1) { + b[i] += - (1. - f) * (2. * D[i] * theta1[i] * BrineSal[i]) / (dz_down[i] * (dz_up[i] + dz_down[i])) + + (1. - f) * (2. * D[i-1] * theta1[i-1] * BrineSal[i-1]) / (dz_down[i] * (dz_up[i] + dz_down[i])); + } + } else { + b[i] += (1. - f) * (2. * D[i-1] * theta1[i-1] * BrineSal[i-1]) / (dz_down[i] * (dz_up[i] + dz_down[i])) + - (1. - f) * (2. * D[i] * theta1[i] * BrineSal[i]) / (dz_up[i] * dz_down[i]) + + (1. - f) * (2. * D[i] * theta1[i] * TopSalinity) / (dz_up[i] * (dz_up[i] + dz_down[i])); + } + } else if(i==0) { + // No flux lower boundary for diffusion (mirroring the i+1 node) + if(ZeroFluxLowerBoundary_diffusion) { + if(NumberOfElements>1) { + b[i] += - (1. - f) * (2. * D[i] * theta1[i] * BrineSal[i]) / (dz_up[i] * (dz_up[i] + dz_down[i])) + + (1. - f) * (2. * D[i+1] * theta1[i+1] * BrineSal[i+1]) / (dz_up[i] * (dz_up[i] + dz_down[i])); + } + } else { + b[i] += (1. - f) * 2. * D[i] * theta1[i] * BottomSalinity / (dz_down[i] * (dz_up[i] + dz_down[i])) + - (1. - f) * (2. * D[i] * theta1[i] * BrineSal[i]) / (dz_up[i] * dz_down[i]) + + (1. - f) * (2. * D[i+1] * theta1[i+1] * BrineSal[i+1]) / (dz_up[i] * (dz_up[i] + dz_down[i])); + } + } else { + b[i] += (1. - f) * (2. * D[i-1] * theta1[i-1] * BrineSal[i-1]) / (dz_down[i] * (dz_up[i] + dz_down[i])) + - (1. - f) * (2. * D[i] * theta1[i] * BrineSal[i]) / (dz_up[i] * dz_down[i]) + + (1. - f) * (2. * D[i+1] * theta1[i+1] * BrineSal[i+1]) / (dz_up[i] * (dz_up[i] + dz_down[i])); + } + + + //The r.h.s. vector advection part: + if(i==0 && i==NumberOfElements-1) { + // TODO: What to do in the case of only 1 element?? + std::cerr << "Only one snow/ice element present, which is not implemented.\n"; + throw; + } else if (i==0) { + b[i] += (1. - f) * (flux_up[i] * ((!DonorCell || flux_up[i]>0.) ? (BrineSal[i+1]) : (BrineSal[i])) - flux_down[i] * (((!DonorCell && !UpstreamBoundaries) || flux_down[i]<0.) ? (BottomSalinity) : (BrineSal[i]))) / (dz_up[i] + dz_down[i]) / ((DonorCell) ? (0.5) : (1.)); + } else if (i==NumberOfElements-1) { + b[i] += (1. - f) * (flux_up[i] * (((!DonorCell && !UpstreamBoundaries) || flux_up[i]>0.) ? (TopSalinity) : (BrineSal[i])) - flux_down[i] * ((!DonorCell || flux_down[i]<0.) ? (BrineSal[i-1]) : (BrineSal[i]))) / (dz_up[i] + dz_down[i]) / ((DonorCell) ? (0.5) : (1.)); + } else { + b[i] += (1. - f) * (flux_up[i] * ((!DonorCell || flux_up[i]>0.) ? (BrineSal[i+1]) : (BrineSal[i])) - flux_down[i] * ((!DonorCell || flux_down[i]<0.) ? (BrineSal[i-1]) : (BrineSal[i]))) / (dz_up[i] + dz_down[i]) / ((DonorCell) ? (0.5) : (1.)); + } + + + // The r.h.s. vector source/sink term: + b[i] += -sb[i]; + } + + + // Deal with boundary conditions: + + // Add the terms from "out of boundary" diffusion + if(!ZeroFluxLowerBoundary_diffusion) b[0] += f * (2. * D[0] * theta2[0] * BottomSalinity) / (dz_down[0] * (dz_up[0] + dz_down[0])); + if(!ZeroFluxUpperBoundary_diffusion) b[NumberOfElements-1] += f * (2. * D[NumberOfElements-1] * theta2[NumberOfElements-1] * TopSalinity) / (dz_up[NumberOfElements-1] * (dz_up[NumberOfElements-1] + dz_down[NumberOfElements-1])); + + + // Add the terms from "out of boundary" advection + if(flux_down[0]<0 || !UpstreamBoundaries) b[0] += -f * (flux_down[0] * ((!DonorCell || flux_down[0]<0.) ? (BottomSalinity) : (0.*BrineSal[0]))) / (dz_up[0] + dz_down[0]) / ((DonorCell) ? (0.5) : (1.)); + if(flux_up[NumberOfElements-1]>0 || !UpstreamBoundaries) b[NumberOfElements-1] += f * (flux_up[NumberOfElements-1] * ((!DonorCell || flux_up[NumberOfElements-1]>0.) ? (TopSalinity) : (0.*BrineSal[NumberOfElements-1]))) / (dz_up[NumberOfElements-1] + dz_down[NumberOfElements-1]) / ((DonorCell) ? (0.5) : (1.)); + + + // Dump solver info on stdout + if(WriteDebugOutput) { + std::cout << "SalinityTransport.cc > Solver:\n"; + std::cout << " dt = " << dt << "\n"; + std::cout << "SalinityTransport.cc > Coefficients:\n"; + for(size_t i = 0; i < NumberOfElements; i++) { + if(i==NumberOfElements-1) { + std::cout << i << ": " << std::scientific << D[i] << " " << flux_up[i] << " " << flux_down[i] << " " << flux_up_2[i] << " " << flux_down_2[i] << " " << sb[i] << " " << theta1[i] << " " << theta2[i] << " " << BrineSal[i] << " " << ad[i] << " " << "---" << " " << "---" << " " << b[i] << "\n"; + } else { + std::cout << i << ": " << std::scientific << D[i] << " " << flux_up[i] << " " << flux_down[i] << " " << flux_up_2[i] << " " << flux_down_2[i] << " " << sb[i] << " " << theta1[i] << " " << theta2[i] << " " << " " << BrineSal[i] << " " << ad[i] << " " << adl[i] << " " << adu[i] << " " << b[i] << "\n"; + } + } + } + + + // Track boundary fluxes (time step t) + BottomSalFlux += -(1. - f) * (flux_down[0] * dt * ((flux_down[0] > 0.) ? (BrineSal[0] * (Constants::density_water + SeaIce::betaS * BrineSal[0])) : (BottomSalinity * (Constants::density_water + SeaIce::betaS * BottomSalinity)))); + TopSalFlux += (1. - f) * (flux_up[NumberOfElements-1] * dt * ((flux_up[NumberOfElements-1] > 0.) ? (TopSalinity * (Constants::density_water + SeaIce::betaS * TopSalinity)) : (BrineSal[NumberOfElements-1] * (Constants::density_water + SeaIce::betaS * BrineSal[NumberOfElements-1])))); + + + // Call solver + const int matrixdimensions=int(NumberOfElements); // Cast from size_t to int is necessary, to interface correctly with LAPACK dgtsv_. +#ifdef CLAPACK + // Call LAPACK DGTSV: Solver for tridiagonal matrices, with partial pivoting. + int info=0; + const int vectordimensions=1; + dgtsv_( (integer*) &matrixdimensions, (integer*) &vectordimensions, &adl[0], &ad[0], &adu[0], &b[0], (integer*) &matrixdimensions, (integer*) &info ); + + if(info!=0) { + //= 0: successful exit + //< 0: if INFO = -i, the i-th argument had an illegal value + //> 0: if INFO = i, U(i,i) is exactly zero, and the solution + // has not been computed. The factorization has not been + // completed unless i = N. + std::cout << "[E] Error in SalinityTransport.cc: DGTSV failed [info = " << info << "].\n"; + return false; + } +#else + // Call TDMASolver: Thomas algorithm for tidiagonal matrices. Not the recommended choice, but useful when LAPACK is not available. + std::vector b_ = b; + const int ret = ReSolver1d::TDMASolver(matrixdimensions, &adl[0], &ad[0], &adu[0], &b[0], &b_[0]); + b=b_; + if (ret != 0) { + std::cout << "[E] Error in SalinityTransport.cc: TDMA failed.\n"; + std::cout << " Using LAPACK (see compile options) may increase numerical stability in SalinityTransport.\n"; + return false; + } +#endif + + + // Apply solution + if(WriteDebugOutput) std::cout << "SalinityTransport.cc > Solution vector:\n"; + for(size_t i=0; i 0.) ? (BrineSal[0] * (Constants::density_water + SeaIce::betaS * BrineSal[0])) : (BottomSalinity * (Constants::density_water + SeaIce::betaS * BottomSalinity)))); + TopSalFlux += f * (flux_up[NumberOfElements-1] * dt * ((flux_up[NumberOfElements-1] > 0.) ? (TopSalinity * (Constants::density_water + SeaIce::betaS * TopSalinity)) : (BrineSal[NumberOfElements-1] * (Constants::density_water + SeaIce::betaS * BrineSal[NumberOfElements-1])))); + + + return true; +} + + +/** + * @brief Solve diffusion-advection equation using the upwind explicit method\n + * @author Nander Wever + * @param dt Time step (s) + * @param DeltaSal Result vector (change in salinity over time step) + * @return false on error, true otherwise + */ +bool SalinityTransport::SolveSalinityTransportEquationExplicit(const double dt, std::vector &DeltaSal) { + + if(NumberOfElements==0) return false; // Nothing to do + + const bool WriteDebugOutput = false; + if(WriteDebugOutput) setvbuf(stdout, NULL, _IONBF, 0); + + // Declare vectors + std::vector b(NumberOfElements, 0.); // Solution vector + + if((ZeroFluxUpperBoundary_advection_in && flux_up[NumberOfElements-1] > 0.) + || (ZeroFluxUpperBoundary_advection_out && flux_up[NumberOfElements-1] < 0.)) flux_up[NumberOfElements-1] = 0.; + if(ZeroFluxLowerBoundary_advection) flux_down[0] = 0.; + + // Fill matrix and r.h.s. vector + for(size_t i = 0; i < NumberOfElements; i++) { + b[i] += (theta1[i] * BrineSal[i]); + + // Explicit upwind scheme for advection: + const double tmp_flux = (flux_up[i] * dz_up[i] + flux_down[i] * dz_down[i]) / (dz_up[i] + dz_down[i]); + const double tmp_flux_2 = (flux_up_2[i] * dz_up[i] + flux_down_2[i] * dz_down[i]) / (dz_up[i] + dz_down[i]); + + // First advection term +// b[i] += BrineSal[i] * (flux_up[i] * dt - flux_down[i] * dt) / dz_[i] + +// tmp_flux * dt * ( (tmp_flux > 0.) ? ((((i==NumberOfElements-1) ? (TopSalinity) : (BrineSal[i+1])) - BrineSal[i]) / dz_up[i]) : (((BrineSal[i] - ((i==0) ? (BottomSalinity) : (BrineSal[i-1]))) / dz_down[i])) ); + b[i] += BrineSal[i] * (flux_up[i] * dt - flux_down[i] * dt) / dz_[i] + + flux_up[i] * dt * ( (flux_up[i]>0.) ? ((i==NumberOfElements-1) ? (TopSalinity) : (BrineSal[i+1])) : (- BrineSal[i]) ) /*/ dz_up[i]*/ + + flux_down[i] * dt * ( (flux_down[i]>0.) ? (-BrineSal[i]) : ((i==0) ? (-BottomSalinity) : (-BrineSal[i-1])) ) /*/ dz_down[i]*/; + + // Second advection term + /*b[i] += BrineSal[i] * (flux_up_2[i] * dt - flux_down_2[i] * dt) / dz_[i] + + tmp_flux_2 * dt * ( (tmp_flux_2 > 0.) ? ((((i==NumberOfElements-1) ? (TopSalinity) : (BrineSal[i+1])) - BrineSal[i]) / dz_up[i]) : (((BrineSal[i] - ((i==0) ? (BottomSalinity) : (BrineSal[i-1]))) / dz_down[i])) );*/ + + // Explicit scheme for diffusion, note that we force a zero flux condition at the top boundary (mirroring the cell i-1) + //b[i] += dt * ( ((i==NumberOfElements-1) ? (2. * D[i-1] * theta1[i-1] * BrineSal[i-1]) : (2. * theta1[i+1] * D[i+1] * BrineSal[i+1])) / (dz_up[i]*(dz_up[i]+dz_down[i])) - (2. * theta1[i] * D[i] * BrineSal[i]) / (dz_up[i]*dz_down[i]) + (((i==0) ? (2. * D[i] * theta1[i] * BottomSalinity) : (2. * theta1[i-1] * D[i-1] * BrineSal[i-1]))) / (dz_down[i]*(dz_up[i]+dz_down[i])) ); + if (i == 0) { + b[i] += dt * (2. * (theta1[i+1] * D[i+1] * BrineSal[i+1] - theta1[i] * D[i] * BrineSal[i])) / (dz_up[i]*(dz_up[i]+dz_down[i])); + } else if (i == NumberOfElements-1) { + //b[i] += dt * (2. * (theta1[i-1] * D[i-1] * BrineSal[i-1] - theta1[i] * D[i] * BrineSal[i])) / (dz_down[i]*(dz_up[i]+dz_down[i])); + } else { + b[i] += dt * ((2. * theta1[i+1] * D[i+1] * BrineSal[i+1]) / (dz_up[i]*(dz_up[i]+dz_down[i])) - ((2. * theta1[i] * D[i] * BrineSal[i]) / (dz_up[i]*dz_down[i])) + ((2. * theta1[i-1] * D[i-1] * BrineSal[i-1]) / (dz_down[i]*(dz_up[i]+dz_down[i]))) ); + } + + + // Source/sink term + b[i] += -sb[i]; + + b[i] /= theta2[i]; + + // Track boundary fluxes + if(i==0) { + // Lower boundary advection + BottomSalFlux += -BrineSal[i] * flux_down[i] * dt - BrineSal[i] * flux_down_2[i] * dt + + flux_down[i] * dt * ( /*(tmp_flux > 0.) ? (0.) :*/ ((BrineSal[i] - BottomSalinity) / dz_down[i]) ) + + flux_down_2[i] * dt * ( /*(tmp_flux_2 > 0.) ? (0.) : */ ((BrineSal[i] - BottomSalinity) / dz_down[i]) ); + // Lower boundary diffusion + BottomSalFlux += dt * (-D[i] * theta1[i] * BrineSal[i] + D[i] * theta1[i] * BottomSalinity) / (dz_down[i]); + + } + if(i==NumberOfElements-1) { + // Upper boundary advection + TopSalFlux = BrineSal[i] * flux_up[i] * dt - BrineSal[i] * flux_up_2[i] * dt + + tmp_flux * dt * ( (tmp_flux > 0.) ? ((TopSalinity - BrineSal[i]) / dz_down[i]) : (0.) ) + + tmp_flux_2 * dt * ( (tmp_flux_2 > 0.) ? ((TopSalinity - BrineSal[i]) / dz_down[i]) : (0.) ); + // Upper boundary diffusion + TopSalFlux += dt * (-D[i] * theta1[i] * BrineSal[i] + D[i] * theta1[i] * TopSalinity) / (dz_up[i]); + } + } + + // Apply solution + for(size_t i=0; i CFL_limit) {printf("FAIL1@%d ", int(i)); return false;} +// if (tmp_flux_2 * dt / std::min(dz_up[i], dz_down[i]) > CFL_limit) {printf("FAIL2@%d ", int(i)); return false;} + if (std::max( fabs(flux_up[i]) , fabs(flux_down[i]) ) * (dt / (std::min(dz_up[i], dz_down[i]))) > CFL_limit) {printf("Criterion 1 @%d ", int(i)); return false;} + if (std::max( fabs(flux_up_2[i]) , fabs(flux_down_2[i]) ) * (dt / (std::min(dz_up[i], dz_down[i]))) > CFL_limit) {printf("Criterion 2 @%d ", int(i)); return false;} + // Check CFL for diffusion + if (D[i] * theta1[i] * dt / (std::min(dz_up[i], dz_down[i]) * std::min(dz_up[i], dz_down[i])) > CFL_limit) {printf("Criterion 3 @%d ", int(i)); return false;} + } + return true; +} + + +/** + * @brief Check for Implicit criterion\n + * @author Nander Wever + * @param dt Time step (s) + * @return true when provided time step dt satisfies criterion to reduce spurious oscillations, false otherwise. + */ +bool SalinityTransport::VerifyImplicitDt(const double dt) +{ + const double limit = 0.999; + for(size_t i = 0; i < NumberOfElements; i++) { + // Check advection + if (std::max( fabs(flux_up[i])/dz_up[i] , fabs(flux_down[i]/dz_down[i]) ) * dt > limit) {printf("Criterion 4 @%d ", int(i)); return false;} + if (std::max( fabs(flux_up_2[i])/dz_up[i] , fabs(flux_down_2[i]/dz_down[i]) ) * dt > limit) {printf("Criterion 5 @%d ", int(i)); return false;} + // Check diffusion + if (i!=0 && D[i] * theta1[i] * dt / (std::min(dz_up[i], dz_down[i]) * std::min(dz_up[i], dz_down[i])) > limit) {printf("FAIL5@%d ", int(i)); return false;} + if (i==0 && D[i] * dt / (std::min(dz_up[i], dz_down[i]) * std::min(dz_up[i], dz_down[i])) > limit) {printf("Criterion 6 @%d ", int(i)); return false;} + } + return true; +} diff --git a/third_party/snowpack/snowpackCore/SalinityTransport.h b/third_party/snowpack/snowpackCore/SalinityTransport.h new file mode 100644 index 00000000..404abffa --- /dev/null +++ b/third_party/snowpack/snowpackCore/SalinityTransport.h @@ -0,0 +1,66 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ +/** + * @file SalinityTransport.h + */ + +#ifndef SALINITYTRANSPORT_H +#define SALINITYTRANSPORT_H + +#include + +/** + * @class SalinityTransport + * @author Nander Wever + * @brief This module contains the solver for the diffusion-advection equation for the transport of salinity + */ +class SalinityTransport { + + public: + SalinityTransport(size_t nE); // Class constructor + + bool VerifyCFL(const double dt); + bool VerifyImplicitDt(const double dt); + bool SolveSalinityTransportEquationImplicit(const double dt, std::vector & DeltaSal, const double f, const bool DonorCell = true); // Donor cell or central differences? + bool SolveSalinityTransportEquationExplicit(const double dt, std::vector & DeltaSal); + enum SalinityTransportSolvers{EXPLICIT, IMPLICIT, IMPLICIT2}; + + std::vector flux_up; //Flux with element above (negative=upward, positive=downward) + std::vector flux_down; //Flux with element below (negative=upward, positive=downward) + std::vector flux_up_2; //Flux with element above (negative=upward, positive=downward) + std::vector flux_down_2; //Flux with element below (negative=upward, positive=downward) + std::vector dz_; //Grid cell size + std::vector dz_up; //Grid cell distance above + std::vector dz_down; //Grid cell distance below + std::vector theta1; //Vol. liquid water content (m^3/m^3), at t=n + std::vector theta2; //Vol. liquid water content (m^3/m^3), at t=n+1 + std::vector BrineSal; //Salinity in brine, in g/(m^3_water) + std::vector D; //Diffusivity + std::vector sb; //Source/sink term for brine salinity + + double BottomSalinity, TopSalinity; //The boundary conditions bottom and top salinities. + + double BottomSalFlux, TopSalFlux; //Bottom and top salt flux + + private: + void SetDomainSize(size_t nE); + size_t NumberOfElements; +}; +#endif //End of SalinityTransport.h diff --git a/third_party/snowpack/snowpackCore/SeaIce.cc b/third_party/snowpack/snowpackCore/SeaIce.cc new file mode 100644 index 00000000..cbc3614b --- /dev/null +++ b/third_party/snowpack/snowpackCore/SeaIce.cc @@ -0,0 +1,742 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ +/** + * @file SeaIce.cc + * @version 23.09 + * @bug - + * @brief This module contains the sea ice specific routines for the 1d snowpack model + */ + +#include "../Constants.h" +#include "../Utils.h" +#include "Metamorphism.h" +#include "SeaIce.h" +#include "ReSolver1d.h" + +#include +#include +#include + +using namespace mio; +using namespace std; + +/** + * @page sea_ice Sea Ice + * + */ + +/************************************************************ + * static section * + ************************************************************/ + +//Threshold that defines ice +const double SeaIce::SeaWaterFreezingTemp = IOUtils::C_TO_K(-1.95); +const double SeaIce::SeaIceDensity = ReSolver1d::max_theta_ice * Constants::density_ice; +const double SeaIce::ice_threshold = 800.; +const double SeaIce::mu = 0.054; // Freezing point coefficient +const double SeaIce::betaS = 0.824; // Density coefficient (see: Appendix A Sea Water Density According to UNESCO Formula, https://link.springer.com/content/pdf/bbm%3A978-3-319-18908-6%2F1.pdf) +const double SeaIce::ThicknessFirstIceLayer = 0.01; +const double SeaIce::InitRg = 5.; +const double SeaIce::InitRb = 2.5; +const double SeaIce::OceanSalinity = 35.; +const double SeaIce::InitSeaIceSalinity = 5.; +const double SeaIce::InitSnowSalinity = 0.; + + +/************************************************************ + * non-static section * + ************************************************************/ + +SeaIce::SeaIce(): + SeaLevel(0.), ForcedSeaLevel(IOUtils::nodata), FreeBoard (0.), IceSurface(0.), IceSurfaceNode(0), OceanHeatFlux(0.), BottomSalFlux(0.), TopSalFlux(0.), check_initial_conditions(false), salinityprofile(SINUSSAL) {} + +SeaIce& SeaIce::operator=(const SeaIce& source) { + if(this != &source) { + SeaLevel = source.SeaLevel; + ForcedSeaLevel = source.ForcedSeaLevel; + FreeBoard = source.FreeBoard; + IceSurface = source.IceSurface; + IceSurfaceNode = source.IceSurfaceNode; + OceanHeatFlux = source.OceanHeatFlux; + } + return *this; +} + +SeaIce::~SeaIce() {} + +void SeaIce::ConfigSeaIce(const SnowpackConfig& i_cfg) { + // Read salinity profile + std::string tmp_salinityprofile; + i_cfg.getValue("SALINITYPROFILE", "SnowpackSeaice", tmp_salinityprofile, mio::IOUtils::nothrow); + if (tmp_salinityprofile=="NONE") { + salinityprofile=NONE; + } else if (tmp_salinityprofile=="CONSTANT") { + salinityprofile=CONSTANT; + } else if (tmp_salinityprofile=="COXANDWEEKS") { + salinityprofile=COXANDWEEKS; + } else if (tmp_salinityprofile=="LINEARSAL") { + salinityprofile=LINEARSAL; + } else if (tmp_salinityprofile=="LINEARSAL2") { + salinityprofile=LINEARSAL2; + } else if (tmp_salinityprofile=="SINUSSAL") { + salinityprofile=SINUSSAL; + } else { + prn_msg( __FILE__, __LINE__, "err", Date(), "Unknown salinity profile (key: SALINITYPROFILE)."); + throw; + } + + // Read whether or not to check the initial conditions + i_cfg.getValue("CHECK_INITIAL_CONDITIONS", "SnowpackSeaice", check_initial_conditions, mio::IOUtils::nothrow); + return; +} + +std::iostream& operator<<(std::iostream& os, const SeaIce& data) +{ + os.write(reinterpret_cast(&data.FreeBoard), sizeof(data.FreeBoard)); + os.write(reinterpret_cast(&data.IceSurface), sizeof(data.IceSurface)); + os.write(reinterpret_cast(&data.IceSurfaceNode), sizeof(data.IceSurfaceNode)); + os.write(reinterpret_cast(&data.OceanHeatFlux), sizeof(data.OceanHeatFlux)); + return os; +} + +std::iostream& operator>>(std::iostream& is, SeaIce& data) +{ + is.read(reinterpret_cast(&data.FreeBoard), sizeof(data.FreeBoard)); + is.read(reinterpret_cast(&data.IceSurface), sizeof(data.IceSurface)); + is.read(reinterpret_cast(&data.IceSurfaceNode), sizeof(data.IceSurfaceNode)); + is.read(reinterpret_cast(&data.OceanHeatFlux), sizeof(data.OceanHeatFlux)); + return is; +} + +/** + * @brief Determines the salinity and associated melting temperature + * @param Xdata SnowStation to apply the salinity profile to + */ +void SeaIce::compSalinityProfile(SnowStation& Xdata) +{ + const size_t nE = Xdata.getNumberOfElements(); + findIceSurface(Xdata); + + switch ( salinityprofile ) { + + case NONE: + { + break; + } + + case CONSTANT: + { + for (size_t e = Xdata.SoilNode; e < nE; e++) { + Xdata.Edata[e].salinity = 35.; // Default: 35 g/kg + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + /*size_t e = Xdata.SoilNode; + for (; e < IceSurfaceNode; e++) { + Xdata.Edata[e].salinity = 35.; // Default: 35 g/kg + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + for (; e < nE; e++) { + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + }*/ + break; + } + + case COXANDWEEKS: + { + for (size_t e = Xdata.SoilNode; e < nE; e++) { + if(Xdata.Ndata[e].z >= findIceSurface(Xdata)) { + // For snow + Xdata.Edata[e].salinity = 1.; + } else { + // For ice + if(Xdata.Ndata[e].z < 0.4) { + Xdata.Edata[e].salinity = 14.24 - 19.39 * Xdata.Ndata[e].z; + } else { + Xdata.Edata[e].salinity = 7.88 - 1.59 * Xdata.Ndata[e].z; + } + } + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + break; + } + + case LINEARSAL: + { + const double topSal = 1.; + const double botSal = 5.; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + Xdata.Edata[e].salinity = ((topSal - botSal) / (Xdata.Ndata[IceSurfaceNode].z - Xdata.Ndata[0].z)) * 0.5 * (Xdata.Ndata[e].z + Xdata.Ndata[e+1].z); // linear gradient between 1 psu (top) to 4 psu (bottom) + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + break; + } + + case LINEARSAL2: + { + const double topSal = 1.; + const double botSal = 5.; + // define salinity in ice + size_t e = Xdata.SoilNode; + for (; e < IceSurfaceNode ; e++) { + Xdata.Edata[e].salinity = ((topSal - botSal) / (Xdata.Ndata[IceSurfaceNode].z - Xdata.Ndata[0].z)) * 0.5 * (Xdata.Ndata[e].z + Xdata.Ndata[e+1].z); // linear gradient between 1 psu (top) to 4 psu (bottom) + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + // define salinity in snow + for (; e < nE ; e++) { + Xdata.Edata[e].salinity = 1; + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + break; + } + + // C shaped salinity profile + case SINUSSAL: + { + const double topSal = 12.; + const double ampSal = 8.; + const double PI = 3.141592653589793; + // define salinity in ice + size_t e = Xdata.SoilNode; + for (; e < IceSurfaceNode ; e++) { + Xdata.Edata[e].salinity = ampSal* sin((Xdata.Ndata[e].z / (Xdata.Ndata[IceSurfaceNode].z - Xdata.Ndata[0].z))*PI+PI)+topSal; // c shaped salinity profile in sea ice + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + // define salinity in snow + for (; e < nE ; e++) { + Xdata.Edata[e].salinity = 1; // 8 after Massom et al. 1997 + Xdata.Edata[e].updDensity(); + calculateMeltingTemperature(Xdata.Edata[e]); + } + break; + } + default: + InvalidArgumentException("Unknown salinity profile provided", AT); + + } +} + +/** + * @brief Updates the freeboard variable (i.e., sea level with respect to ice surface)\n + * positive: sea level below ice surface\n + * negative: sea level above ice surface (flooding)\n + * @version 16.08 + * @param Xdata SnowStation object to use in calculation + */ +void SeaIce::updateFreeboard(SnowStation& Xdata) +{ + Xdata.compSnowpackMasses(); + SeaLevel = (ForcedSeaLevel!=IOUtils::nodata) ? (ForcedSeaLevel) : (Xdata.swe / (Constants::density_water + SeaIce::betaS * SeaIce::OceanSalinity)); + const double FreeBoard_snow = Xdata.cH - SeaLevel; // This is the freeboard relative to snow surface + FreeBoard = (findIceSurface(Xdata) - (Xdata.cH - FreeBoard_snow)); + return; +} + +/** + * @brief Find snow/ice transition for sea ice simulations\n + * @version 16.08 + * @param Xdata SnowStation object to use in calculation + */ +double SeaIce::findIceSurface(SnowStation& Xdata) +{ + const size_t nE = Xdata.getNumberOfElements(); + + // Now find ice/snow transition + if(nE == 0) { + IceSurface = 0.; + IceSurfaceNode = 0; + return IceSurface; + } + // Deal with the case that the top element is ice + if (Xdata.Edata[nE-1].theta[ICE] * Constants::density_ice > ice_threshold) { + IceSurface = Xdata.Ndata[nE].z; + IceSurfaceNode = nE; + return IceSurface; + } + // Go from top to bottom. Note that ice layers inside the snowpack may fool this simple search. + for (size_t e = nE-1; e-- > 0;) { + if (Xdata.Edata[e].theta[ICE] * Constants::density_ice > ice_threshold && Xdata.Edata[e+1].theta[ICE] * Constants::density_ice < ice_threshold) { + IceSurface = Xdata.Ndata[e+1].z; + IceSurfaceNode = e+1; + return IceSurface; + } + } + IceSurfaceNode = 0; + IceSurface = 0.; + return IceSurface; +} + +/** + * @brief Apply flooding\n + * @version 16.08 + * @param Xdata SnowStation object to use in calculation + * @param Sdata + */ +void SeaIce::compFlooding(SnowStation& Xdata, SurfaceFluxes& Sdata) +{ + size_t iN = 0; + while (iN < Xdata.getNumberOfElements() && Xdata.Ndata[iN].z + 0.5 * Xdata.Edata[iN].L < SeaLevel) { + const double dth_w = std::max(0., Xdata.Edata[iN].theta[AIR] * (Constants::density_ice / Constants::density_water) - Xdata.Edata[iN].theta[WATER] * (Constants::density_water / Constants::density_ice - 1.)); + Xdata.Edata[iN].theta[WATER] += dth_w; + Xdata.Edata[iN].theta[AIR] -= dth_w; + Xdata.Edata[iN].salinity += SeaIce::OceanSalinity * dth_w; + Xdata.Edata[iN].salinity = std::min(SeaIce::OceanSalinity, Xdata.Edata[iN].salinity); + Sdata.mass[SurfaceFluxes::MS_FLOODING]-=Xdata.Edata[iN].Rho * Xdata.Edata[iN].L; + Xdata.Edata[iN].updDensity(); + Sdata.mass[SurfaceFluxes::MS_FLOODING]+=Xdata.Edata[iN].Rho * Xdata.Edata[iN].L; + Xdata.Edata[iN].M = Xdata.Edata[iN].Rho * Xdata.Edata[iN].L; + calculateMeltingTemperature(Xdata.Edata[iN]); + iN++; + } + return; +} + + +/** + * @brief Calculate melting temperature as function of brine salinity + * @version 16.08 + * @param Edata Layer element to use in calculation + */ +void SeaIce::calculateMeltingTemperature(ElementData& Edata) +{ + // See: Bitz, C. M., and W. H. Lipscomb (1999), An energy-conserving thermodynamic model of sea ice, J. Geophys. Res., 104(C7), 15669–15677, doi:10.1029/1999JC900100. + // who is citing: Assur, A., Composition of sea ice and its tensile strength, in Arctic Sea Ice, N. A. S. N. R. C. Publ., 598, 106-138, 1958. + Edata.meltfreeze_tk = (Edata.theta[WATER] + Edata.theta[WATER_PREF] > 0.) ? (SeaIce::calculateMeltingTemperature(Edata.salinity / (Edata.theta[WATER] + Edata.theta[WATER_PREF]))) : (Constants::meltfreeze_tk); + return; +} + + +/** + * @brief Calculate melting temperature as function of brine salinity + * @version 17.12: initial version + * @param Sal: Brine salinity (PSU, which is g/kg) + */ +double SeaIce::calculateMeltingTemperature(const double& Sal) +{ + // See: Bitz, C. M., and W. H. Lipscomb (1999), An energy-conserving thermodynamic model of sea ice, J. Geophys. Res., 104(C7), 15669–15677, doi:10.1029/1999JC900100. + // who is citing: Assur, A., Composition of sea ice and its tensile strength, in Arctic Sea Ice, N. A. S. N. R. C. Publ., 598, 106-138, 1958. + return IOUtils::C_TO_K(-SeaIce::mu * Sal); +} + + +/** + * @brief Heat capacity of sea ice, for the combined system ICE + WATER (brine). + * @version 16.08: initial version + * @param T: Temperature (K) + * @param Sal: Salinity (PSU, which is g/kg) + * @return Heat capacity for sea ice (J / kg / K) + */ +double SeaIce::compSeaIceHeatCapacity(const double& T, const double& Sal) +{ + // From: Bitz, C. M., and W. H. Lipscomb (1999), An energy-conserving thermodynamic model of sea ice, J. Geophys. Res., 104(C7), 15669–15677, doi:10.1029/1999JC900100. + // See Eq. 1 and 2 + const double L0 = Constants::lh_fusion; + const double c0 = Constants::specific_heat_ice; + return c0 + (SeaIce::mu * L0 * Sal) / (T * T); +} + + +/** + * @brief Heat conduction in sea ice, for the combined system ICE + WATER (brine) + * @version 16.08: initial version + * @param Edata + * @return Thermal conductivity for sea ice (W K-1 m-1) + */ +double SeaIce::compSeaIceThermalConductivity(const ElementData& Edata) +{ + // From: Bitz, C. M., and W. H. Lipscomb (1999), An energy-conserving thermodynamic model of sea ice, J. Geophys. Res., 104(C7), 15669–15677, doi:10.1029/1999JC900100. + // See Eq. 9 + const double beta = 0.1172; // W/m^2/permille + const double k0 = 2.034; // W/m/K, note that this is the thermal conductivity of fresh ice, and it may be coupled to the value in Constants.h + // Note the conversion from kg/kg to permille for salinity + return (k0 + ((beta * Edata.salinity) / Edata.Te)); +} + + +/** + * @brief Latent heat of melting for sea ice, for the combined system ICE + WATER (brine) + * @version 16.08: initial version + * @param T: Temperatur (K) + * @param Sal: Salinity (PSU, which is g/kg) + * @return Latent heat of fusion for sea ice (J / kg) + */ +double SeaIce::compSeaIceLatentHeatFusion(const double& T, const double& Sal) +{ + // From: Bitz, C. M., and W. H. Lipscomb (1999), An energy-conserving thermodynamic model of sea ice, J. Geophys. Res., 104(C7), 15669–15677, doi:10.1029/1999JC900100. + // See Eq. 5 + const double L0 = Constants::lh_fusion; + return L0 * (1. + (SeaIce::mu * Sal) / T); +} + + +/** + * @brief Latent heat of melting for sea ice, for the combined system ICE + WATER (brine) + * @version 16.08: initial version + * @param Edata + * @return Latent heat of fusion for sea ice (J / kg) + */ +double SeaIce::compSeaIceLatentHeatFusion(const ElementData& Edata) +{ + // From: Bitz, C. M., and W. H. Lipscomb (1999), An energy-conserving thermodynamic model of sea ice, J. Geophys. Res., 104(C7), 15669–15677, doi:10.1029/1999JC900100. + // See Eq. 5 + const double L0 = Constants::lh_fusion; + const double c0 = Constants::specific_heat_ice; + return c0 * (Edata.meltfreeze_tk - Edata.Te) + L0 * (1. + (SeaIce::mu * Edata.salinity) / Edata.Te); +} + + +/** + * @brief Calculate ice formation and decay at the bottom + * @version 16.08: initial version + * @param Xdata SnowStation object to use in calculation + * @param Mdata Meteo data + * @param sn_dt Time step (s) + * @param Sdata + */ +void SeaIce::bottomIceFormation(SnowStation& Xdata, const CurrentMeteo& Mdata, const double& sn_dt, SurfaceFluxes& Sdata) +{ + vector& NDS = Xdata.Ndata; + vector& EMS = Xdata.Edata; + size_t nE = Xdata.getNumberOfElements(); + if (Xdata.getNumberOfElements() == 0 && Xdata.Ndata[Xdata.SoilNode].T >= SeaIce::SeaWaterFreezingTemp) { + // Just open ocean + return; + } + + double netBottomEnergy = 0.; + double dM = 0.; + if (nE > 0 ) { + // With at least one element, calculate net energy flux + // Here: netBottomEnergy has units W/m^2 + netBottomEnergy = OceanHeatFlux + + compSeaIceThermalConductivity(EMS[Xdata.SoilNode]) * ( (NDS[Xdata.SoilNode+1].T - NDS[Xdata.SoilNode].T) / (NDS[Xdata.SoilNode+1].z - NDS[Xdata.SoilNode].z)); + dM = (-netBottomEnergy * sn_dt) / compSeaIceLatentHeatFusion(EMS[Xdata.SoilNode]); + } else { + // First time freezing, create first ice layer + // Here: netBottomEnergy has units W / kg + //netBottomEnergy = (Xdata.Ndata[Xdata.SoilNode].T - SeaIce::SeaWaterFreezingTemp) * compSeaIceHeatCapacity(Xdata.Ndata[Xdata.SoilNode].T, SeaIce::OceanSalinity) / sn_dt; + // Convert netBottomEnergy to W / m^2, assuming a 1 cm freezing layer + // TODO: insert density ocean water + // TODO: we don't know the ocean temperature profile, so we cannot accurately know how thick the first ice layer is + //const double freezing_ocean_depth = 0.1; + //netBottomEnergy *= Constants::density_water * freezing_ocean_depth; + //dM = (-netBottomEnergy * sn_dt) / compSeaIceLatentHeatFusion(Xdata.Ndata[Xdata.SoilNode].T, SeaIce::OceanSalinity); + dM = ThicknessFirstIceLayer * SeaIceDensity; + } + ApplyBottomIceMassBalance(Xdata, Mdata, dM, Sdata); +} + + +/** + * @brief Apply mass gain/loss at the bottom (dM) + * @version 16.08: initial version + * @param Xdata + * @param Mdata + * @param dM: mass change from phase changes at the bottom of the sea ice (kg/m^2), positive=gain, negative=loss. Note, dM only concerns change in ice mass. + * @param Sdata + */ +void SeaIce::ApplyBottomIceMassBalance(SnowStation& Xdata, const CurrentMeteo& Mdata, double dM, SurfaceFluxes& Sdata) +{ + // Dereference pointers + vector& NDS = Xdata.Ndata; + vector& EMS = Xdata.Edata; + size_t nE = Xdata.getNumberOfElements(); + + // Apply mass change: + double dz = 0.; + if ( dM > 0 ) { + // dM > 0: mass gain + if ( nE == 0 || EMS[Xdata.SoilNode].Rho < ice_threshold ) { + + const double dH = dM / SeaIceDensity; // Total height to be added. Note that dM represents the amount of ice resulting from the phase changes from the energy balance at the bottom of the ice. + const size_t nAddE = 1; // Number of elements + const double dL = (dH / double(nAddE)); // Height of each individual layer + for ( size_t j = 0; j < nAddE; j++ ) { + dz += dL; + nE++; + Xdata.resize(nE); + if(nE > 1) { + // Shift all existing elements up in the domain + for(size_t ee = nE-1; ee > Xdata.SoilNode; ee--) { + EMS[ee]=EMS[ee-1]; + NDS[ee+1]=NDS[ee]; + NDS[ee]=NDS[ee-1]; + } + } else { + // Set upper node for very first element in the domain that will be newly created + NDS[nE].T = SeaIce::calculateMeltingTemperature(OceanSalinity); + } + // Set the new ice element + EMS[Xdata.SoilNode].depositionDate = Mdata.date; + EMS[Xdata.SoilNode].L0 = EMS[Xdata.SoilNode].L = dL; + EMS[Xdata.SoilNode].theta[SOIL] = 0.; + EMS[Xdata.SoilNode].theta[ICE] = (SeaIceDensity/Constants::density_ice); + EMS[Xdata.SoilNode].theta[WATER] = (1. - EMS[Xdata.SoilNode].theta[ICE]) * (Constants::density_ice/Constants::density_water); + EMS[Xdata.SoilNode].theta[WATER_PREF] = 0.; + EMS[Xdata.SoilNode].theta[AIR] = 1.0 - EMS[Xdata.SoilNode].theta[WATER] - EMS[Xdata.SoilNode].theta[WATER_PREF] - EMS[Xdata.SoilNode].theta[ICE] - EMS[Xdata.SoilNode].theta[SOIL]; + EMS[Xdata.SoilNode].salinity = OceanSalinity * EMS[Xdata.SoilNode].theta[WATER]; + EMS[Xdata.SoilNode].updDensity(); + Sdata.mass[SurfaceFluxes::MS_ICEBASE_MELTING_FREEZING]+=dM; + + + for (unsigned short ii = 0; ii < Xdata.number_of_solutes; ii++) { + EMS[Xdata.SoilNode].conc[ICE][ii] = Mdata.conc[ii]*Constants::density_ice/Constants::density_water; + EMS[Xdata.SoilNode].conc[WATER][ii] = Mdata.conc[ii]; + EMS[Xdata.SoilNode].conc[AIR][ii] = 0.; + EMS[Xdata.SoilNode].conc[SOIL][ii] = 0.; + } + + // Constitutive Parameters + EMS[Xdata.SoilNode].k[TEMPERATURE] = EMS[Xdata.SoilNode].k[SEEPAGE] = EMS[Xdata.SoilNode].k[SETTLEMENT]= 0.; + EMS[Xdata.SoilNode].heatCapacity(); + EMS[Xdata.SoilNode].c[SEEPAGE] = EMS[Xdata.SoilNode].c[SETTLEMENT]= 0.; + EMS[Xdata.SoilNode].soil[SOIL_RHO] = EMS[Xdata.SoilNode].soil[SOIL_K] = EMS[Xdata.SoilNode].soil[SOIL_C] = 0.; + EMS[Xdata.SoilNode].snowResidualWaterContent(); + + //new snow micro-structure + EMS[Xdata.SoilNode].sw_abs = 0.; + EMS[Xdata.SoilNode].rg = InitRg; + EMS[Xdata.SoilNode].dd = 0.; + EMS[Xdata.SoilNode].sp = 1.; + EMS[Xdata.SoilNode].rb = InitRb; + EMS[Xdata.SoilNode].N3 = Metamorphism::getCoordinationNumberN3(EMS[Xdata.SoilNode].Rho); + EMS[Xdata.SoilNode].opticalEquivalentGrainSize(); + EMS[Xdata.SoilNode].mk = 7; + EMS[Xdata.SoilNode].metamo = 0.; + EMS[Xdata.SoilNode].snowType(); // Snow classification + EMS[Xdata.SoilNode].dth_w = 0.; + EMS[Xdata.SoilNode].Qmf = 0.; + EMS[Xdata.SoilNode].QIntmf = 0.; + EMS[Xdata.SoilNode].dEps = 0.; + EMS[Xdata.SoilNode].Eps = EMS[Xdata.SoilNode].Eps_e = EMS[Xdata.SoilNode].Eps_v = EMS[Xdata.SoilNode].Eps_Dot = EMS[Xdata.SoilNode].Eps_vDot = EMS[Xdata.SoilNode].E = 0.; + EMS[Xdata.SoilNode].S = 0.; + EMS[Xdata.SoilNode].C = EMS[Xdata.SoilNode].CDot = 0.; + EMS[Xdata.SoilNode].ps2rb = 0.; + EMS[Xdata.SoilNode].s_strength = 0.; + EMS[Xdata.SoilNode].hard = 0.; + EMS[Xdata.SoilNode].S_dr = IOUtils::nodata; + EMS[Xdata.SoilNode].crit_cut_length = Constants::undefined; + EMS[Xdata.SoilNode].VG.theta_r = 0.; + EMS[Xdata.SoilNode].lwc_source = 0.; + EMS[Xdata.SoilNode].PrefFlowArea = 0.; + EMS[Xdata.SoilNode].dsm = 0.; + + EMS[Xdata.SoilNode].h = EMS[Xdata.SoilNode+1].h + .5 * dL; + + // Initial nodal properties + NDS[Xdata.SoilNode].u = 0.; // Initial displacement is 0 + NDS[Xdata.SoilNode].hoar = 0.; // The new snow surface hoar is set to zero + NDS[Xdata.SoilNode].udot = 0.; // Settlement rate is also 0 + NDS[Xdata.SoilNode].f = 0.; // Unbalanced forces are 0 + NDS[Xdata.SoilNode].S_n = IOUtils::nodata; + NDS[Xdata.SoilNode].S_s = IOUtils::nodata; + NDS[Xdata.SoilNode].z = 0.; + + BottomSalFlux += EMS[Xdata.SoilNode].salinity * dL; + } + } else { + // In this case, increase existing element + const double dL = dM / (EMS[Xdata.SoilNode].theta[ICE] * Constants::density_ice); + dz += dL; + const double L0 = EMS[Xdata.SoilNode].L; + EMS[Xdata.SoilNode].L0 = EMS[Xdata.SoilNode].L = (L0 + dL); + EMS[Xdata.SoilNode].updDensity(); + EMS[Xdata.SoilNode].h += .5 * dL; + BottomSalFlux += EMS[Xdata.SoilNode].salinity * dL; + Sdata.mass[SurfaceFluxes::MS_ICEBASE_MELTING_FREEZING]+=dM; + + } + } else { + // dM < 0: Mass loss + while (dM < 0. && nE > 0) { + if(EMS[Xdata.SoilNode].theta[ICE] * Constants::density_ice * EMS[Xdata.SoilNode].L + dM > Constants::eps2) { + // Reduce element length + const double dL = dM / (EMS[Xdata.SoilNode].theta[ICE] * Constants::density_ice); + EMS[Xdata.SoilNode].L0 = EMS[Xdata.SoilNode].L = EMS[Xdata.SoilNode].L + dL; + EMS[Xdata.SoilNode].updDensity(); + BottomSalFlux += EMS[Xdata.SoilNode].salinity * dL; + dz += dL; + Sdata.mass[SurfaceFluxes::MS_ICEBASE_MELTING_FREEZING]+=dM; + dM = 0.; + } else { + // Remove element + dM += EMS[Xdata.SoilNode].theta[ICE] * Constants::density_ice * EMS[Xdata.SoilNode].L; + Sdata.mass[SurfaceFluxes::MS_ICEBASE_MELTING_FREEZING]-=EMS[Xdata.SoilNode].Rho*EMS[Xdata.SoilNode].L; + dz += -EMS[Xdata.SoilNode].L; + // TODO: put mass in SNOWPACK runoff! + // Add salinity to BottomSalFlux + BottomSalFlux += EMS[Xdata.SoilNode].salinity * -EMS[Xdata.SoilNode].L; + if(nE > Xdata.SoilNode) { + if(EMS[Xdata.SoilNode+1].VG.defined) { + if(EMS[Xdata.SoilNode+1].h > EMS[Xdata.SoilNode+1].VG.h_e) { + EMS[Xdata.SoilNode+1].h = EMS[Xdata.SoilNode].h; + } + } + // Shift all existing elements down in the domain + for(size_t ee = Xdata.SoilNode; ee < nE-1; ee++) { + EMS[ee]=EMS[ee+1]; + NDS[ee]=NDS[ee+1]; + NDS[ee+1]=NDS[ee+2]; + } + } + nE--; + Xdata.resize(nE); + } + } + } + + // Adjust domain + NDS[Xdata.SoilNode].z = 0.; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + NDS[e + 1].z = NDS[e].z + EMS[e].L; + } + + // Adjust externally forced sea level: + if (ForcedSeaLevel != IOUtils::nodata) { + ForcedSeaLevel += dz; + } + + // Ocean water is infinite, so as much ice will be created as energy available, i.e., the bottom node is at meltfreeze_tk! + calculateMeltingTemperature(EMS[Xdata.SoilNode]); + if (nE > 0) NDS[Xdata.SoilNode].T = SeaIce::calculateMeltingTemperature(SeaIce::OceanSalinity); + EMS[Xdata.SoilNode].Te = 0.5 * (NDS[Xdata.SoilNode].T + NDS[Xdata.SoilNode+1].T); + EMS[Xdata.SoilNode].gradT = (NDS[Xdata.SoilNode+1].T - NDS[Xdata.SoilNode].T) / EMS[Xdata.SoilNode].L; + return; +} + + +/** + * @brief Returns the average bulk salinity (g / kg) + * @param Xdata Snow cover data + */ +double SeaIce::getAvgBulkSalinity(const SnowStation& Xdata) +{ + const size_t nE = Xdata.getNumberOfElements(); + double ret = 0.; + double dH = 0.; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + ret += Xdata.Edata[e].salinity * Xdata.Edata[e].Rho * Xdata.Edata[e].L; + dH += Xdata.Edata[e].Rho * Xdata.Edata[e].L; + } + return (dH>0.) ? (ret/dH) : (IOUtils::nodata); +} + + +/** + * @brief Returns the average brine salinity (g / kg) + * @param Xdata Snow cover data + */ +double SeaIce::getAvgBrineSalinity(const SnowStation& Xdata) +{ + const size_t nE = Xdata.getNumberOfElements(); + double ret = 0.; + double dH = 0.; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + ret += Xdata.Edata[e].theta[WATER] * Xdata.Edata[e].L * (((Xdata.Edata[e].theta[WATER] + Xdata.Edata[e].theta[WATER_PREF]) != 0.) ? (Xdata.Edata[e].salinity / (Xdata.Edata[e].theta[WATER] + Xdata.Edata[e].theta[WATER_PREF])) : (0.)); + dH += Xdata.Edata[e].theta[WATER] * Xdata.Edata[e].L; + } + return (dH>0.) ? (ret/dH) : (IOUtils::nodata); +} + + +/** + * @brief Returns the total salinity (g / m^2) + * @param Xdata Snow cover data + */ +double SeaIce::getTotSalinity(const SnowStation& Xdata) +{ + const size_t nE = Xdata.getNumberOfElements(); + double ret = 0.; + for (size_t e = Xdata.SoilNode; e < nE; e++) { + ret += (Xdata.Edata[e].theta[WATER] + Xdata.Edata[e].theta[WATER_PREF]) * Constants::density_water * Xdata.Edata[e].L * Xdata.Edata[e].salinity; + } + return ret; +} + +/** + * @brief Initializes a SnowStation object for appropriate sea ice conditions \n + * First, water and ice content is calculated, while maintaining initial bulk salinity and temperature + * After that, initialize pressure head consistently with the displaced ocean water + * @version 21.06: initial version + * @author Nander Wever + * @param Xdata The SnowStation object to initialize + */ +void SeaIce::InitSeaIce(SnowStation& Xdata) +{ + const size_t nE = Xdata.getNumberOfElements(); + if (nE==0 || !check_initial_conditions) return; // Nothing to do... + + double totM = 0.; // Tracks total mass + + // Set thermodynamical properties consistently (temperature, salinity, etc): + for (size_t e = Xdata.SoilNode; e < nE; e++) { + // If a layer is reported as dry, no salinity can be present: + if (Xdata.Edata[e].theta[WATER]compSalinityProfile(Xdata); + Xdata.Seaice->OceanHeatFlux=(Bdata.qg == Constants::undefined)?(0.):(Bdata.qg); + Xdata.Seaice->bottomIceFormation(Xdata, Mdata, sn_dt, Sdata); + Xdata.Seaice->compSalinityProfile(Xdata); + Xdata.Seaice->updateFreeboard(Xdata); +} diff --git a/third_party/snowpack/snowpackCore/SeaIce.h b/third_party/snowpack/snowpackCore/SeaIce.h new file mode 100644 index 00000000..5366257c --- /dev/null +++ b/third_party/snowpack/snowpackCore/SeaIce.h @@ -0,0 +1,103 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ +/** + * @file SeaIce.h + * @version 10.02 + * This header file contains all the data structures needed for the 1d snowpack model + */ + +#ifndef SEAICE_H +#define SEAICE_H + +#include "../DataClasses.h" + +#include +#include +#include + +// Forward-declare classes +class ElementData; +class SnowStation; +class CurrentMeteo; +class BoundCond; +class SurfaceFluxes; + +class SeaIce { + public: + SeaIce(); + ~SeaIce(); + void ConfigSeaIce(const SnowpackConfig& i_cfg); + SeaIce& operator=(const SeaIce&); ///>(std::iostream& is, SeaIce& data); + + void calculateMeltingTemperature(ElementData& Edata); + void compSalinityProfile(SnowStation& Xdata); + void updateFreeboard(SnowStation& Xdata); + double findIceSurface(SnowStation& Xdata); + void compFlooding(SnowStation& Xdata, SurfaceFluxes& Sdata); + void bottomIceFormation(SnowStation& Xdata, const CurrentMeteo& Mdata, const double& sn_dt, SurfaceFluxes& Sdata); + void ApplyBottomIceMassBalance(SnowStation& Xdata, const CurrentMeteo& Mdata, double dM, SurfaceFluxes& Sdata); + + double getAvgBulkSalinity(const SnowStation& Xdata); + double getAvgBrineSalinity(const SnowStation& Xdata); + double getTotSalinity(const SnowStation& Xdata); + + void InitSeaIce(SnowStation& Xdata); + + void runSeaIceModule(SnowStation& Xdata, const CurrentMeteo& Mdata, BoundCond& Bdata, const double& sn_dt, SurfaceFluxes& Sdata); + +}; //end class Snowpack + +#endif diff --git a/third_party/snowpack/snowpackCore/Snowpack.cc b/third_party/snowpack/snowpackCore/Snowpack.cc index 25cb6936..432ae9c2 100644 --- a/third_party/snowpack/snowpackCore/Snowpack.cc +++ b/third_party/snowpack/snowpackCore/Snowpack.cc @@ -25,15 +25,16 @@ */ #include "Snowpack.h" -#include "../Constants.h" -#include "../Laws_sn.h" +#include "Solver.h" #include "../Meteo.h" -#include "../SnowDrift.h" +#include "../Constants.h" #include "../Utils.h" +#include "../Laws_sn.h" +#include "WaterTransport.h" +//#include "VapourTransport.h" +#include "../TechnicalSnow.h" #include "Metamorphism.h" #include "PhaseChange.h" -#include "Solver.h" -#include "WaterTransport.h" #include #include @@ -45,6 +46,8 @@ using namespace std; /************************************************************ * static section * ************************************************************/ +//Minimum allowed snowpack time step for solving the heat equation (automatic time stepping is applied when equation does not converge) +const double Snowpack::min_allowed_sn_dt = 0.01; //Uses an empirically determined size of deposited hydrometeors as new snow grain size (mm) const bool Snowpack::hydrometeor = false; @@ -53,16 +56,14 @@ const bool Snowpack::hydrometeor = false; const double Snowpack::snowfall_warning = 0.5; const unsigned int Snowpack::new_snow_marker = 0; -const double Snowpack::new_snow_albedo = 0.9; -const double Snowpack::min_snow_albedo = 0.3; /// Min volumetric ice content allowed const double Snowpack::min_ice_content = SnLaws::min_hn_density / Constants::density_ice; /// @brief Define the assembly macro -void Snowpack::EL_INCID(const size_t &e, int Ie[]) { - Ie[0] = static_cast( e ); - Ie[1] = static_cast( e+1 ); +void Snowpack::EL_INCID(const int &e, int Ie[]) { + Ie[0] = e; + Ie[1] = e+1; } /// @brief Define the node to element temperature macro @@ -84,35 +85,47 @@ void Snowpack::EL_RGT_ASSEM(double F[], const int Ie[], const double Fe[]) { ************************************************************/ Snowpack::Snowpack(const SnowpackConfig& i_cfg) - : cfg(i_cfg), surfaceCode(), + : surfaceCode(), cfg(i_cfg), techsnow(i_cfg), variant(), viscosity_model(), watertransportmodel_snow("BUCKET"), watertransportmodel_soil("BUCKET"), hn_density(), hn_density_parameterization(), sw_mode(), snow_albedo(), albedo_parameterization(), albedo_average_schmucki(), sw_absorption_scheme(), - atm_stability_model(), allow_adaptive_timestepping(false), albedo_fixedValue(Constants::glacier_albedo), hn_density_fixedValue(SnLaws::min_hn_density), + atm_stability_model(), albedo_NIED_av(0.75), albedo_fixedValue(Constants::glacier_albedo), hn_density_fixedValue(SnLaws::min_hn_density), meteo_step_length(0.), thresh_change_bc(-1.0), geo_heat(Constants::undefined), height_of_meteo_values(0.), height_new_elem(0.), sn_dt(0.), t_crazy_min(0.), t_crazy_max(0.), thresh_rh(0.), thresh_dtempAirSnow(0.), new_snow_dd(0.), new_snow_sp(0.), new_snow_dd_wind(0.), new_snow_sp_wind(0.), rh_lowlim(0.), bond_factor_rh(0.), new_snow_grain_size(0.), new_snow_bond_size(0.), hoar_density_buried(0.), hoar_density_surf(0.), hoar_min_size_buried(0.), - minimum_l_element(0.), t_surf(0.), - research_mode(false), useCanopyModel(false), enforce_measured_snow_heights(false), detect_grass(false), - soil_flux(false), useSoilLayers(false), combine_elements(false), reduce_n_elements(false), + minimum_l_element(0.), comb_thresh_l(IOUtils::nodata), t_surf(0.), + allow_adaptive_timestepping(false), research_mode(false), useCanopyModel(false), enforce_measured_snow_heights(false), detect_grass(false), + soil_flux(false), useSoilLayers(false), coupled_phase_changes(false), combine_elements(false), reduce_n_elements(false), change_bc(false), meas_tss(false), vw_dendricity(false), - enhanced_wind_slab(false), alpine3d(false), ageAlbedo(true), adjust_height_of_meteo_values(true), advective_heat(false), heat_begin(0.), heat_end(0.), - temp_index_degree_day(0.), temp_index_swr_factor(0.), forestfloor_alb(false) + enhanced_wind_slab(false), alpine3d(false), ageAlbedo(true), adjust_height_of_meteo_values(true), + adjust_height_of_wind_value(false), advective_heat(false), heat_begin(0.), heat_end(0.), + temp_index_degree_day(0.), temp_index_swr_factor(0.), forestfloor_alb(false), rime_index(false), newsnow_lwc(false), read_dsm(false), soil_evaporation(), soil_thermal_conductivity() { cfg.getValue("ALPINE3D", "SnowpackAdvanced", alpine3d); cfg.getValue("VARIANT", "SnowpackAdvanced", variant); + cfg.getValue("COUPLEDPHASECHANGES", "SnowpackAdvanced", coupled_phase_changes); + if (variant=="SEAICE") coupled_phase_changes = true; // to better deal with variable freezing point due to salinity //Define keys for new snow density computation cfg.getValue("HN_DENSITY", "SnowpackAdvanced", hn_density); - cfg.getValue("TEMP_INDEX_DEGREE_DAY", "SnowpackAdvanced", temp_index_degree_day); - cfg.getValue("TEMP_INDEX_SWR_FACTOR", "SnowpackAdvanced", temp_index_swr_factor); + cfg.getValue("TEMP_INDEX_DEGREE_DAY", "SnowpackAdvanced", temp_index_degree_day, IOUtils::nothrow); + cfg.getValue("TEMP_INDEX_SWR_FACTOR", "SnowpackAdvanced", temp_index_swr_factor, IOUtils::nothrow); cfg.getValue("HN_DENSITY_PARAMETERIZATION", "SnowpackAdvanced", hn_density_parameterization); cfg.getValue("HN_DENSITY_FIXEDVALUE", "SnowpackAdvanced", hn_density_fixedValue); + //Define keys for new snow information + cfg.getValue("RIME_INDEX", "SnowpackAdvanced", rime_index); + cfg.getValue("NEWSNOW_LWC", "SnowpackAdvanced", newsnow_lwc); + cfg.getValue("READ_DSM", "SnowpackAdvanced", read_dsm); + //Define keys for snow albedo computation cfg.getValue("SNOW_ALBEDO", "SnowpackAdvanced", snow_albedo); cfg.getValue("ALBEDO_PARAMETERIZATION", "SnowpackAdvanced", albedo_parameterization); cfg.getValue("ALBEDO_AVERAGE_SCHMUCKI", "SnowpackAdvanced", albedo_average_schmucki); + if (albedo_parameterization=="NIED") + cfg.getValue("ALBEDO_NIED_AV", "SnowpackAdvanced", albedo_NIED_av); + else + albedo_NIED_av=Constants::undefined; cfg.getValue("ALBEDO_FIXEDVALUE", "SnowpackAdvanced", albedo_fixedValue); cfg.getValue("ALBEDO_AGING", "SnowpackAdvanced", ageAlbedo); @@ -124,8 +137,9 @@ Snowpack::Snowpack(const SnowpackConfig& i_cfg) /* Defines the management of the bottom boundary conditions with soil layers * - 0 ==> Dirichlet, i.e fixed Temperature * - 1 ==> Neumann, fixed geothermal heat flux GEO_HEAT */ - cfg.getValue("SOIL_FLUX", "Snowpack", soil_flux); - if (useSoilLayers && soil_flux) { + cfg.getValue("SOIL_FLUX", "Snowpack", soil_flux, IOUtils::nothrow); + if ((useSoilLayers && soil_flux) || variant == "SEAICE") { + // For sea ice, geo_heat is ocean heat flux cfg.getValue("GEO_HEAT", "Snowpack", geo_heat); //Constant geothermal heat flux at (great) depth (W m-2) } else { geo_heat = Constants::undefined; @@ -210,18 +224,21 @@ Snowpack::Snowpack(const SnowpackConfig& i_cfg) //Activates algorithm to reduce the number of elements deeper in the snowpack AND to split elements again when they come back to the surface //Only works when COMBINE_ELEMENTS == TRUE. cfg.getValue("REDUCE_N_ELEMENTS", "SnowpackAdvanced", reduce_n_elements); + cfg.getValue("COMB_THRESH_L", "SnowpackAdvanced", comb_thresh_l, IOUtils::nothrow); + if(comb_thresh_l == IOUtils::nodata) comb_thresh_l = SnowStation::comb_thresh_l_ratio * height_new_elem; // If no comb_thresh_l specified, use the default one (i.e., a fixed ratio from height_new_elem) //Warning is issued if snow tempeartures are out of bonds, that is, crazy cfg.getValue("T_CRAZY_MIN", "SnowpackAdvanced", t_crazy_min); cfg.getValue("T_CRAZY_MAX", "SnowpackAdvanced", t_crazy_max); cfg.getValue("FORESTFLOOR_ALB", "SnowpackAdvanced", forestfloor_alb); + /* Initial new snow parameters, see computeSnowFall() * - that rg and rb are equal to 0.5*gsz and 0.5*bsz, respectively. Both given in millimetres * - If VW_DENDRICITY is set, new snow dendricity is f(vw) * - BOND_FACTOR_RH new snow bonds get stronger for average winds >= SnLaws::event_wind_lowlim and * mean relative humidity >= rh_lowlim */ - if (variant == "ANTARCTICA") { + if (variant == "ANTARCTICA" || variant == "POLAR") { new_snow_dd = 0.5; new_snow_sp = 0.75; new_snow_dd_wind = 0.15; @@ -261,14 +278,29 @@ Snowpack::Snowpack(const SnowpackConfig& i_cfg) //Watertransport models cfg.getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", watertransportmodel_snow); cfg.getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", watertransportmodel_soil); + if (variant!="SEAICE" && coupled_phase_changes && useSoilLayers && watertransportmodel_soil=="RICHARDSEQUATION") + throw IOException("COUPLEDPHASECHANGES cannot be set to true when WATERTRANSPORTMODEL_SOIL == RICHARDSEQUATION! Please correct your ini file.", AT); //Indicate if the meteo values can be considered at constant height above the snow surface (e.g., Col de Porte measurement method) cfg.getValue("ADJUST_HEIGHT_OF_METEO_VALUES", "SnowpackAdvanced", adjust_height_of_meteo_values); + cfg.getValue("ADJUST_HEIGHT_OF_WIND_VALUE", "SnowpackAdvanced", adjust_height_of_wind_value); // Allow for the effect of a known advective heat flux - cfg.getValue("ADVECTIVE_HEAT", "SnowpackAdvanced", advective_heat); - cfg.getValue("HEAT_BEGIN", "SnowpackAdvanced", heat_begin); - cfg.getValue("HEAT_END", "SnowpackAdvanced", heat_end); + cfg.getValue("ADVECTIVE_HEAT", "SnowpackAdvanced", advective_heat, IOUtils::nothrow); + cfg.getValue("HEAT_BEGIN", "SnowpackAdvanced", heat_begin, IOUtils::nothrow); + cfg.getValue("HEAT_END", "SnowpackAdvanced", heat_end, IOUtils::nothrow); + + /* Get the soil evaporation model to be used + * - EVAP_RESISTANCE: Resistance Approach, see Laws_sn.c␊ + * - RELATIVE_HUMIDITY: Relative Humidity Approach, see Snowpack.cc + * - NONE: none, assume saturation pressure and no extra resistance */ + cfg.getValue("SOIL_EVAP_MODEL", "SnowpackAdvanced", soil_evaporation); + /* Get the soil thermal conductivity model to be used + * - FITTED: Use fit values for soil thermal conductivity, see snLaws::compSoilThermalConductivity() + * - RAW: Use simply Edata.soil[SOIL_K] + Edata.theta[WATER] * SnLaws::conductivity_water(Edata.Te) + + Edata.theta[ICE] * SnLaws::conductivity_ice(Edata.Te) */ + cfg.getValue("SOIL_THERMAL_CONDUCTIVITY", "SnowpackAdvanced", soil_thermal_conductivity); + } void Snowpack::setUseSoilLayers(const bool& value) { //NOTE is this really needed? @@ -363,9 +395,8 @@ void Snowpack::compSnowCreep(const CurrentMeteo& Mdata, SnowStation& Xdata) // Make sure settling is not larger than the space that is available (basically settling can at most reduce theta[AIR] to 0). // We also leave some room in case all liquid water freezes and thereby expands. - double MaxSettlingFactor=1.; // An additional maximum settling factor, between 0 and 1. 1: allow maximize possible settling, 0: no settling allowed. - if (watertransportmodel_snow=="RICHARDSEQUATION") MaxSettlingFactor=0.9; //For stability in the numerical solver. - dL = std::max(dL, std::min(0., -1.*MaxSettlingFactor*L0*(EMS[e].theta[AIR]-((Constants::density_water/Constants::density_ice)-1.)*EMS[e].theta[WATER]))); + const double MaxSettlingFactor = (watertransportmodel_snow=="RICHARDSEQUATION") ? (0.9) : (1. - Constants::eps); // An additional maximum settling factor, between 0 and 1. 1: allow maximize possible settling, 0: no settling allowed. + dL = std::max(dL, std::min(0., -1.*MaxSettlingFactor*L0*(EMS[e].theta[AIR]-((Constants::density_water/Constants::density_ice)-1.)*(EMS[e].theta[WATER]+EMS[e].theta[WATER_PREF])))); // Limit dL when the element length drops below minimum_l_element. This element will be merged in WaterTransport::mergingElements later on. if ((L0 + dL) < (1.-Constants::eps)*minimum_l_element) @@ -392,17 +423,19 @@ void Snowpack::compSnowCreep(const CurrentMeteo& Mdata, SnowStation& Xdata) } EMS[e].theta[WATER] *= L0 / (L0 + dL); + EMS[e].theta[WATER_PREF] *= L0 / (L0 + dL); EMS[e].theta[ICE] *= L0 / (L0 + dL); + EMS[e].theta_i_reservoir *= L0 / (L0 + dL); + EMS[e].theta_i_reservoir_cumul *= L0 / (L0 + dL); EMS[e].L0 = EMS[e].L = (L0 + dL); NDS[e+1].z = NDS[e].z + EMS[e].L; - EMS[e].theta[AIR] = 1.0 - EMS[e].theta[WATER] - EMS[e].theta[ICE] - EMS[e].theta[SOIL]; - EMS[e].Rho = (EMS[e].theta[ICE] * Constants::density_ice) + (EMS[e].theta[WATER] - *Constants::density_water) + (EMS[e].theta[SOIL] - * EMS[e].soil[SOIL_RHO]); - if (! (EMS[e].Rho > 0. && EMS[e].Rho <= Constants::max_rho)) { + EMS[e].theta[AIR] = 1.0 - EMS[e].theta[WATER] - EMS[e].theta[WATER_PREF] - EMS[e].theta[ICE] - EMS[e].theta[SOIL]; + EMS[e].theta[AIR] = std::max(0., EMS[e].theta[AIR]); + EMS[e].updDensity(); + if (EMS[e].Rho <= Constants::eps || (EMS[e].theta[WATER] + EMS[e].theta[WATER_PREF] + EMS[e].theta[ICE] + EMS[e].theta[SOIL] + EMS[e].theta[AIR] - 1) > 1.e-12 ) { prn_msg(__FILE__, __LINE__, "err", Date(), - "Volume contents: e=%d nE=%d rho=%lf ice=%lf wat=%lf air=%le", - e, nE, EMS[e].Rho, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[AIR]); + "Volume contents: e=%d nE=%d rho=%lf ice=%lf wat=%lf wat_pref=%lf air=%le", + e, nE, EMS[e].Rho, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[WATER_PREF], EMS[e].theta[AIR]); throw IOException("Runtime Error in compSnowCreep()", AT); } } @@ -440,13 +473,15 @@ bool Snowpack::sn_ElementKtMatrix(ElementData &Edata, double dt, const double dv // Find the conductivity of the element TODO: check thresholds double Keff; // the effective thermal conductivity if (Edata.theta[SOIL] > 0.0) { - Keff = SnLaws::compSoilThermalConductivity(Edata, dvdz); + Keff = SnLaws::compSoilThermalConductivity(Edata, dvdz, soil_thermal_conductivity); } else if (Edata.theta[ICE] > 0.55 || Edata.theta[ICE] < min_ice_content) { + // Note: no soil when inside this if-block. Keff = Edata.theta[AIR] * Constants::conductivity_air + Edata.theta[ICE] * Constants::conductivity_ice + - Edata.theta[WATER] * Constants::conductivity_water + Edata.theta[SOIL] * Edata.soil[SOIL_K]; + (Edata.theta[WATER]+Edata.theta[WATER_PREF]) * Constants::conductivity_water; } else { Keff = SnLaws::compSnowThermalConductivity(Edata, dvdz, !alpine3d); //do not show the warning for Alpine3D } + // mimics effect of vapour transport if liquid water present in snowpack Keff *= VaporEnhance; Edata.k[TEMPERATURE] = Keff; @@ -474,6 +509,17 @@ bool Snowpack::sn_ElementKtMatrix(ElementData &Edata, double dt, const double dv Se[0][1] += c; Se[1][0] += c; + // Add the source/sink term resulting from phase changes + Fe[1] += Edata.Qph_up * 0.5 * Edata.L; + Fe[0] += Edata.Qph_down * 0.5 * Edata.L; + + // Add the source/sink term resulting from phase changes (due to water vapor transport) + Fe[1] += Edata.Qmm * 1.0 * Edata.L; + Fe[0] += Edata.Qmm * 1.0 * Edata.L; + + //Se[1][1] += Edata.Qmm*Edata.L/Edata.Te; + //Fe[1] += Edata.Qmm*Edata.L; + return true; } @@ -504,11 +550,15 @@ bool Snowpack::sn_ElementKtMatrix(ElementData &Edata, double dt, const double dv */ void Snowpack::updateBoundHeatFluxes(BoundCond& Bdata, SnowStation& Xdata, const CurrentMeteo& Mdata) { - double actual_height_of_meteo_values; - if(!adjust_height_of_meteo_values) - actual_height_of_meteo_values=height_of_meteo_values + Xdata.cH - Xdata.Ground; - else - actual_height_of_meteo_values=height_of_meteo_values; + // Determine actual height of meteo values above Xdata.SoilNode: + double actual_height_of_meteo_values; // Height with reference Xdata.SoilNode + if(!adjust_height_of_meteo_values) { + // Case of fixed height above snow surface (e.g., weather model) + actual_height_of_meteo_values = height_of_meteo_values + Xdata.cH - Xdata.Ground + ( (Xdata.findMarkedReferenceLayer() == Constants::undefined) ? (0.) : (Xdata.findMarkedReferenceLayer()) - Xdata.Ground); + } else { + // Case of fixed height above ground surface (e.g., weather station) + actual_height_of_meteo_values = height_of_meteo_values; + } const double alpha = SnLaws::compSensibleHeatCoefficient(Mdata, Xdata, actual_height_of_meteo_values) * Constants::density_air * Constants::specific_heat_air; const double Tair = Mdata.ta; @@ -519,13 +569,15 @@ void Snowpack::updateBoundHeatFluxes(BoundCond& Bdata, SnowStation& Xdata, const Bdata.qs = alpha * (Tair - Tss); - Bdata.ql = SnLaws::compLatentHeat_Rh(Mdata, Xdata, actual_height_of_meteo_values); + Bdata.ql = SnLaws::compLatentHeat_Rh(soil_evaporation, Mdata, Xdata, actual_height_of_meteo_values); if (Xdata.getNumberOfElements() > 0) { // Limit fluxes in case of explicit treatment of boundary conditions const double theta_r = ((watertransportmodel_snow=="RICHARDSEQUATION" && Xdata.getNumberOfElements()>Xdata.SoilNode) || (watertransportmodel_soil=="RICHARDSEQUATION" && Xdata.getNumberOfElements()==Xdata.SoilNode)) ? (PhaseChange::RE_theta_threshold) : (PhaseChange::theta_r); + const double max_ice = ((watertransportmodel_snow=="RICHARDSEQUATION" && Xdata.getNumberOfElements()>Xdata.SoilNode) || (watertransportmodel_soil=="RICHARDSEQUATION" && Xdata.getNumberOfElements()==Xdata.SoilNode)) ? (ReSolver1d::max_theta_ice * (1. - Constants::eps)) : (1.); if (Xdata.Edata[Xdata.getNumberOfElements()-1].theta[WATER] > theta_r + Constants::eps // Water and ice ... - && Xdata.Edata[Xdata.getNumberOfElements()-1].theta[ICE] > Constants::eps) { // ... coexisting + && Xdata.Edata[Xdata.getNumberOfElements()-1].theta[ICE] > Constants::eps // ... coexisting + && Xdata.Edata[Xdata.getNumberOfElements()-1].theta[ICE] < max_ice) { Bdata.qs = std::min(350., std::max(-350., Bdata.qs)); Bdata.ql = std::min(250., std::max(-250., Bdata.ql)); } @@ -538,11 +590,27 @@ void Snowpack::updateBoundHeatFluxes(BoundCond& Bdata, SnowStation& Xdata, const Bdata.qr = 0.; } - const double lw_in = Constants::emissivity_snow * Constants::stefan_boltzmann * Mdata.ea * Optim::pow4(Tair); - Bdata.lw_out = Constants::emissivity_snow * Constants::stefan_boltzmann * Optim::pow4(Tss); - Bdata.lw_net = lw_in - Bdata.lw_out; + const double emmisivity = (Xdata.getNumberOfElements() > Xdata.SoilNode) ? Constants::emissivity_snow : Xdata.SoilEmissivity; - Bdata.qg = geo_heat; + const double lw_in = emmisivity * Atmosphere::blkBody_Radiation(Mdata.ea, Tair); + Bdata.lw_out = emmisivity * Constants::stefan_boltzmann * Optim::pow4(Tss); + if (Mdata.lw_net == IOUtils::nodata) { + // Default + Bdata.lw_net = lw_in - Bdata.lw_out; + } else { + // NET_LW provided + Bdata.lw_net = Mdata.lw_net; + } + + if (Mdata.geo_heat != IOUtils::nodata) { + // If geo_heat is provided in CurrentMeteo, use it. + Bdata.qg = Mdata.geo_heat; + } else if (geo_heat != Constants::undefined) { + // Otherwise check if geo_heat is defined + Bdata.qg = geo_heat; + } else { + Bdata.qg = 0.; + } } /** @@ -567,11 +635,15 @@ void Snowpack::neumannBoundaryConditions(const CurrentMeteo& Mdata, BoundCond& B double Se[ N_OF_INCIDENCES ][ N_OF_INCIDENCES ], double Fe[ N_OF_INCIDENCES ]) { - double actual_height_of_meteo_values; - if(!adjust_height_of_meteo_values) - actual_height_of_meteo_values=height_of_meteo_values + Xdata.cH - Xdata.Ground; - else - actual_height_of_meteo_values=height_of_meteo_values; + // Determine actual height of meteo values above Xdata.SoilNode: + double actual_height_of_meteo_values; // Height with reference Xdata.SoilNode + if(!adjust_height_of_meteo_values) { + // Case of fixed height above snow surface (e.g., weather model) + actual_height_of_meteo_values = height_of_meteo_values + Xdata.cH - Xdata.Ground; + } else { + // Case of fixed height above ground surface (e.g., weather station) + actual_height_of_meteo_values = height_of_meteo_values; + } const double T_air = Mdata.ta; const size_t nE = Xdata.getNumberOfElements(); @@ -583,9 +655,12 @@ void Snowpack::neumannBoundaryConditions(const CurrentMeteo& Mdata, BoundCond& B // Now branch between phase change cases (semi-explicit treatment) and // dry snowpack dynamics/ice-free soil dynamics (implicit treatment) const double theta_r = ((watertransportmodel_snow=="RICHARDSEQUATION" && Xdata.getNumberOfElements()>Xdata.SoilNode) || (watertransportmodel_soil=="RICHARDSEQUATION" && Xdata.getNumberOfElements()==Xdata.SoilNode)) ? (PhaseChange::RE_theta_threshold) : (PhaseChange::theta_r); + const double max_ice = ((watertransportmodel_snow=="RICHARDSEQUATION" && Xdata.getNumberOfElements()>Xdata.SoilNode) || (watertransportmodel_soil=="RICHARDSEQUATION" && Xdata.getNumberOfElements()==Xdata.SoilNode)) ? (ReSolver1d::max_theta_ice * (1. - Constants::eps)) : (1.); if ((Xdata.Edata[nE-1].theta[WATER] > theta_r + Constants::eps // Water and ice ... - && Xdata.Edata[nE-1].theta[ICE] > Constants::eps) // ... coexisting + && Xdata.Edata[nE-1].theta[ICE] > Constants::eps // ... coexisting + && Xdata.Edata[nE-1].theta[ICE] < max_ice) + && variant != "SEAICE" && (T_iter != T_snow)) { // Explicit // Now allow a temperature index method if desired by the user @@ -610,9 +685,14 @@ void Snowpack::neumannBoundaryConditions(const CurrentMeteo& Mdata, BoundCond& B } // Net longwave radiation: NON-linear dependence on snow surface temperature - const double delta = SnLaws::compLWRadCoefficient( T_iter, T_air, Mdata.ea); - Se[1][1] += delta; - Fe[1] += delta * pow( Mdata.ea, 0.25 ) * T_air; + if (Mdata.lw_net == IOUtils::nodata) { + const double delta = SnLaws::compLWRadCoefficient( T_iter, T_air, Mdata.ea); + Se[1][1] += delta; + Fe[1] += delta * pow( Mdata.ea, 0.25 ) * T_air; + } else { + // When NET_LW is provided, we force explicitly + Fe[1] += Bdata.lw_net; + } // Because of the implicit time integration, must subtract this term from the flux .... Fe[1] -= Se[1][1] * T_snow; @@ -650,6 +730,7 @@ void Snowpack::neumannBoundaryConditionsSoil(const double& flux, const double& T double Snowpack::getParameterizedAlbedo(const SnowStation& Xdata, const CurrentMeteo& Mdata) const { + //please keep in mind that the radiation might have been tweaked in Meteo::compRadiation() const vector& NDS = Xdata.Ndata; const vector& EMS = Xdata.Edata; const size_t nN = Xdata.getNumberOfNodes(); @@ -660,27 +741,21 @@ double Snowpack::getParameterizedAlbedo(const SnowStation& Xdata, const CurrentM // Parameterized albedo (statistical model) including correct treatment of PLASTIC and WATER_LAYER if (nE > Xdata.SoilNode) { //there are some non-soil layers size_t eAlbedo = nE-1; - const size_t marker = EMS[eAlbedo].mk % 10; - - switch (marker) { - case 9: // WATER_LAYER - if (eAlbedo > Xdata.SoilNode) - eAlbedo--; - - case 8: // Ice layer within the snowpack - while ((eAlbedo > Xdata.SoilNode) && (marker == 8)) - eAlbedo--; - - default: // Snow, glacier ice, PLASTIC, or soil - if (eAlbedo > Xdata.SoilNode && (EMS[eAlbedo].theta[SOIL] < Constants::eps2)) { // Snow, or glacier ice - Albedo = SnLaws::parameterizedSnowAlbedo(snow_albedo, albedo_parameterization, albedo_average_schmucki, albedo_fixedValue, EMS[eAlbedo], NDS[eAlbedo+1].T, Mdata, ageAlbedo); - if (useCanopyModel && (Xdata.Cdata.height > 3.5)) { //forest floor albedo - const double age = (forestfloor_alb) ? std::max(0., Mdata.date.getJulian() - Xdata.Edata[eAlbedo].depositionDate.getJulian()) : 0.; // day - Albedo = (Albedo -.3)* exp(-age/7.) + 0.3; - } - } else { // PLASTIC, or soil - Albedo = Xdata.SoilAlb; - } + size_t marker = EMS[eAlbedo].mk % 10; + + while ((marker==8 || marker==9) && eAlbedo > Xdata.SoilNode && !Xdata.isGlacier(false)){ //If Water or ice layer (but not glacier), go one layer down + eAlbedo--; + marker = EMS[eAlbedo].mk % 10; + } + + if (eAlbedo > Xdata.SoilNode && (EMS[eAlbedo].theta[SOIL] < Constants::eps2)) { // Snow, or glacier ice + Albedo = SnLaws::parameterizedSnowAlbedo(snow_albedo, albedo_parameterization, albedo_average_schmucki, albedo_NIED_av, albedo_fixedValue, EMS[eAlbedo], NDS[eAlbedo+1].T, Mdata, ageAlbedo); + if (useCanopyModel && (Xdata.Cdata.height > 3.5)) { //forest floor albedo + const double age = (forestfloor_alb) ? std::max(0., Mdata.date.getJulian() - Xdata.Edata[eAlbedo].depositionDate.getJulian()) : 0.; // day + Albedo = (Albedo -.3)* exp(-age/7.) + 0.3; + } + } else { // PLASTIC, or soil + Albedo = Xdata.SoilAlb; } } @@ -688,10 +763,9 @@ double Snowpack::getParameterizedAlbedo(const SnowStation& Xdata, const CurrentM if (useCanopyModel && (Xdata.Cdata.height > 3.5)) { //forest floor albedo Albedo = std::max(0.05, std::min(0.95, Albedo)); } else { - const bool use_hs_meas = enforce_measured_snow_heights && (Xdata.meta.getSlopeAngle() <= Constants::min_slope_angle); - const double hs = (use_hs_meas)? Xdata.mH - Xdata.Ground : Xdata.cH - Xdata.Ground; - if (research_mode) { // Treatment of "No Snow" on the ground in research mode + const bool use_hs_meas = enforce_measured_snow_heights && (Xdata.meta.getSlopeAngle() <= Constants::min_slope_angle); + const double hs = (use_hs_meas)? Xdata.mH - Xdata.Ground : Xdata.cH - Xdata.Ground; const bool snow_free_ground = (hs < 0.02) || (NDS[nN-1].T > IOUtils::C_TO_K(3.5)) || ((hs < 0.05) && (NDS[nN-1].T > IOUtils::C_TO_K(1.7))); if (snow_free_ground) Albedo = Xdata.SoilAlb; @@ -702,7 +776,7 @@ double Snowpack::getParameterizedAlbedo(const SnowStation& Xdata, const CurrentM if (nE > Xdata.SoilNode) { // For snow - Albedo = std::max(min_snow_albedo, std::min(new_snow_albedo, Albedo)); + Albedo = std::max(Constants::min_albedo, std::min(Constants::max_albedo, Albedo)); } else { // For soil Albedo = std::max(0.05, std::min(0.95, Albedo)); @@ -714,6 +788,7 @@ double Snowpack::getParameterizedAlbedo(const SnowStation& Xdata, const CurrentM double Snowpack::getModelAlbedo(const SnowStation& Xdata, CurrentMeteo& Mdata) const { + //please keep in mind that the radiation might have been tweaked in Meteo::compRadiation() const double pAlbedo = Xdata.pAlbedo; // Assign iswr and rswr correct values according to switch value @@ -724,8 +799,7 @@ double Snowpack::getModelAlbedo(const SnowStation& Xdata, CurrentMeteo& Mdata) c } else if (sw_mode == "BOTH") { // use measured albedo ... // ... while the ground is still snow covered according to HS measurements if (Mdata.mAlbedo != Constants::undefined) { - if ((!((Mdata.mAlbedo < 2.*Xdata.SoilAlb) - && ((Xdata.cH - Xdata.Ground) > 0.05))) && Mdata.mAlbedo <= 0.95) + if ( (!( (Mdata.mAlbedo < 2.*Xdata.SoilAlb) && ((Xdata.cH - Xdata.Ground) > 0.05)) ) && Mdata.mAlbedo <= 0.95) return Mdata.mAlbedo; //we have a measured albedo else Mdata.rswr = Mdata.iswr * pAlbedo; @@ -738,7 +812,7 @@ double Snowpack::getModelAlbedo(const SnowStation& Xdata, CurrentMeteo& Mdata) c exit(EXIT_FAILURE); } - return pAlbedo; //we do not have a measured labedo -> use parametrized + return pAlbedo; //we do not have a measured albedo -> use parametrized } /** @@ -813,16 +887,16 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd // Set bare ground surface temperature with no soil and return if (nN == 1) { - if ((Mdata.ts0 > Constants::melting_tk) && ((Mdata.ts0 - Mdata.ta) > 10.)) + if ((Mdata.ts0 > Constants::meltfreeze_tk) && ((Mdata.ts0 - Mdata.ta) > 10.)) NDS[0].T = (Mdata.ts0 + Mdata.ta) / 2.; else - NDS[0].T = Mdata.ts0; + NDS[0].T = (Mdata.ts0 == IOUtils::nodata) ? (Mdata.ta) : (Mdata.ts0); return true; } if (Kt != NULL) ds_Solve(ReleaseMatrixData, (SD_MATRIX_DATA*)Kt, 0); - ds_Initialize(nN, (SD_MATRIX_DATA**)&Kt); + ds_Initialize(static_cast(nN), (SD_MATRIX_DATA**)&Kt); /* * Define the structure of the matrix, i.e. its connectivity. For each element * we compute the element incidences and pass the incidences to the solver. @@ -830,8 +904,8 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd * equations specified by the incidence set are all connected to each other. * Initialize element data. */ - for (size_t e = 0; e < nE; e++) { - int Nodes[2] = {(int)e, (int)e+1}; + for (int e = 0; e < static_cast(nE); e++) { + int Nodes[2] = {e, e+1}; ds_DefineConnectivity( (SD_MATRIX_DATA*)Kt, 2, Nodes , 1, 0 ); } @@ -872,14 +946,15 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd Xdata.Kt = Kt; // Set the temperature at the snowpack base to the prescribed value. + // This only in case the soil_flux is not used. if (!(useSoilLayers && soil_flux)) { if ((EMS[0].theta[ICE] >= min_ice_content)) { - // NOTE if there is water and ice in the base element, then the base temperature MUST be melting_tk + // NOTE if there is water and ice in the base element, then the base temperature MUST be meltfreeze_tk if ((EMS[0].theta[WATER] > SnowStation::thresh_moist_snow)) { - NDS[0].T = EMS[0].melting_tk; + NDS[0].T = EMS[0].meltfreeze_tk; } else if (!useSoilLayers) { // To avoid temperatures above freezing while snow covered - NDS[0].T = std::min(Mdata.ts0, EMS[0].melting_tk); + NDS[0].T = std::min(Mdata.ts0, EMS[0].meltfreeze_tk); } else { NDS[0].T = Mdata.ts0; } @@ -887,10 +962,20 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd NDS[0].T = Mdata.ts0; } } + // Now treat sea ice variant, in which ocean heat flux is used already at this point to build or destroy sea ice based on the net energy balance, so just set the temperature of the lowest node to melting. + if (variant == "SEAICE") { + NDS[0].T = Xdata.Seaice->calculateMeltingTemperature(SeaIce::OceanSalinity); + } // Copy Temperature at time0 into First Iteration for (size_t n = 0; n < nN; n++) { - U[n] = NDS[n].T; + if(n==nN-1 && coupled_phase_changes && surfaceCode != DIRICHLET_BC) { + //Correct the upper node, as it may have been forced to melting temperature for assessing the energy balance + U[n] = NDS[n].T = 2. * Xdata.Edata[n-1].Te - NDS[n-1].T; + } else { + U[n] = NDS[n].T; + } + dU[n] = 0.0; ddU[n] = 0.0; if (!(U[n] > t_crazy_min && U[n] < t_crazy_max)) { @@ -935,7 +1020,11 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd // validity range for the linearization. Therefore, we increase the MaxItnTemp for these cases: if (nN==3) MaxItnTemp = 200; if (nN==2) MaxItnTemp = 400; - if (nN==1) MaxItnTemp = 2000; + if (nN==1 || coupled_phase_changes) MaxItnTemp = 2000; + + for(size_t e = nE; e -->0; ) Xdata.Edata[e].Qph_up = Xdata.Edata[e].Qph_down = 0.; // Reset the energy flux to the adjecent nodes due to phase changes in the element + std::vector dth_i_up(nE, 0.); // Initialize theta[ICE] change due to phase changes at the upper adjacent node + std::vector dth_i_down(nE, 0.); // Initialize theta[ICE] change due to phase changes at the lower adjacent node // IMPLICIT INTEGRATION LOOP bool TempEqConverged = true; // Return value of this function compTemperatureProfile(...) @@ -945,12 +1034,81 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd ds_Solve(ResetMatrixData, (SD_MATRIX_DATA*)Kt, 0); for (size_t n = 0; n < nN; n++) { ddU[n] = dU[n]; - dU[n] = 0.0; + dU[n] = 0.; } // Assemble matrix + const double theta_rn = ((watertransportmodel_snow=="RICHARDSEQUATION" && Xdata.getNumberOfElements()>Xdata.SoilNode) || (watertransportmodel_soil=="RICHARDSEQUATION" && Xdata.getNumberOfElements()==Xdata.SoilNode)) ? (PhaseChange::RE_theta_r) : (PhaseChange::theta_r); + double maxd = 0.; // Tracks max. change in ice contents in domain (convergence criterion) for(size_t e = nE; e -->0; ) { - EL_INCID( e, Ie ); + if(coupled_phase_changes) { + // Initialize the change in ice contents due to phase changes based on the energy source/sink terms at the adjacent nodes + dth_i_up[e] = Xdata.Edata[e].Qph_up / ((Constants::density_ice * Constants::lh_fusion) / sn_dt); + dth_i_down[e] = Xdata.Edata[e].Qph_down / ((Constants::density_ice * Constants::lh_fusion) / sn_dt); + Xdata.Edata[e].Qph_up = Xdata.Edata[e].Qph_down = 0.; + + // Calculate the melting/freezing associated with the current temperature state + const double max_ice = ReSolver1d::max_theta_ice; + const double A = (Xdata.Edata[e].c[TEMPERATURE] * Xdata.Edata[e].Rho) / ( Constants::density_ice * Constants::lh_fusion ); + const double dth_i_up_in = dth_i_up[e]; + const double dth_i_down_in = dth_i_down[e]; + + if (Xdata.Seaice != NULL) { + // For sea ice, balance the meltfreeze_tk with assuming thermal equilibrium with the brine: + // (1): Xdata.Edata[e].meltfreeze_tk = Xdata.Edata[e].meltfreeze_tk = -SeaIce::mu * BrineSal_new + Constants::meltfreeze_tk; + // (2): BrineSal_new = (Xdata.Edata[e].salinity / (Xdata.Edata[e].theta[WATER] + deltaTheta)); + // (3): deltaTheta = A * (0.5 * (U[e+1] + U[e]) - Xdata.Edata[e].meltfreeze_tk) * (Constants::density_water / Constants::density_ice); + // Balancing equations (1), (2) and (3) derived using wxmaxima: + // T=-m*s/(th+(A*(u-T)))+c + // solve(%i1,T); + // With: + // T = Xdata.Edata[e].meltfreeze_tk + // m = SeaIce::mu + // s = Xdata.Edata[e].salinity + // th = tmp_Theta + // A = A * f + // u = tmp_T + // c = Constants::meltfreeze_tk + const double f = Constants::density_ice / Constants::density_water; + const double tmp_T = 0.5 * (U[e+1] + U[e]); + const double tmp_Theta = Xdata.Edata[e].theta[WATER] - 0.5 * (dth_i_up[e] + dth_i_down[e]) * f; + Xdata.Edata[e].meltfreeze_tk = -1. * (sqrt(A * f * A * f * tmp_T * tmp_T + (2. * A * f * tmp_Theta - 2. * A * f * A * f * Constants::meltfreeze_tk) * tmp_T + tmp_Theta * tmp_Theta - 2. * A * f * Constants::meltfreeze_tk * tmp_Theta + 4. * A * f * SeaIce::mu * Xdata.Edata[e].salinity + A * f * A * f * Constants::meltfreeze_tk * Constants::meltfreeze_tk) - A * f * tmp_T - tmp_Theta - A * f * Constants::meltfreeze_tk) / (2. * A * f); + } + + dth_i_up[e] += A * (Xdata.Edata[e].meltfreeze_tk - U[e+1]); // change in volumetric ice content in upper half of element + dth_i_down[e] += A * (Xdata.Edata[e].meltfreeze_tk - U[e]); // change in volumetric ice content in lower half of element + + // This approach is not stable, may introduce oscillations such that the temperature equation doesn't converge + const double dth_i_sum = 0.5 * (dth_i_up[e] + dth_i_down[e]); // Net phase change effect on ice content in element + if(dth_i_sum != 0.) { // Element has phase changes + double dth_i_lim = dth_i_sum; + if(dth_i_lim < 0.) { + // Melt: Only available ice can melt + dth_i_lim = std::max(-Xdata.Edata[e].theta[ICE], dth_i_lim); + } else { + // Freeze: Only available liquid water can freeze, and not more than max_ice + dth_i_lim = std::min(std::max(0., std::min(max_ice - Xdata.Edata[e].theta[ICE], (Xdata.Edata[e].theta[WATER] - theta_rn) * (Constants::density_water / Constants::density_ice))), dth_i_lim); + } + // Correct volumetric changes in upper and lower half of element proportional to limits + dth_i_down[e] = dth_i_up[e] = dth_i_lim; + } + + // Track max. abs. change in ice contents + maxd = std::max(maxd, fabs(dth_i_up[e] - dth_i_up_in)); + maxd = std::max(maxd, fabs(dth_i_down[e] - dth_i_down_in)); + + // Recalculate phase change energy + Xdata.Edata[e].Qph_up = (dth_i_up[e] * Constants::density_ice * Constants::lh_fusion) / sn_dt; + Xdata.Edata[e].Qph_down = (dth_i_down[e] * Constants::density_ice * Constants::lh_fusion) / sn_dt; + + if (Xdata.Seaice != NULL) { + // Adjust melting/freezing point assuming thermal quilibrium in the brine pockets + const double ThetaWater_new = (Xdata.Edata[e].theta[WATER] - 0.5 * (dth_i_up[e] + dth_i_down[e]) * (Constants::density_ice / Constants::density_water)); + const double BrineSal_new = (ThetaWater_new == 0.) ? (0.) : (Xdata.Edata[e].salinity / ThetaWater_new); + Xdata.Edata[e].meltfreeze_tk = Xdata.Seaice->calculateMeltingTemperature(BrineSal_new); + } + } + EL_INCID( static_cast(e), Ie ); EL_TEMP( Ie, T0, TN, NDS, U ); // Update the wind pumping velocity gradient const double dvdz = SnLaws::compWindGradientSnow(EMS[e], v_pump); @@ -973,9 +1131,10 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd * Several terms must be added to the global stiffness matrix Kt and flux * right-hand side vector dU. Note: Shortwave radiation --- since it is a body * or volumetric force --- is computed in sn_ElementKtMatrix(). - */ + */ + if (surfaceCode == NEUMANN_BC) { - EL_INCID(nE-1, Ie); + EL_INCID(static_cast(nE-1), Ie); EL_TEMP(Ie, T0, TN, NDS, U); neumannBoundaryConditions(Mdata, Bdata, Xdata, T0[1], TN[1], Se, Fe); ds_AssembleMatrix( (SD_MATRIX_DATA*)Kt, 2, Ie, 2, (double*) Se ); @@ -986,11 +1145,11 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd if (surfaceCode == DIRICHLET_BC) { // Dirichlet BC at surface: prescribed temperature value // NOTE Insert Big at this location to hold the temperature constant at the prescribed value. - Ie[0] = static_cast( nE ); - ds_AssembleMatrix((SD_MATRIX_DATA*)Kt, 1, Ie, 1, &Big); + Ie[0] = static_cast(nE); + ds_AssembleMatrix((SD_MATRIX_DATA*) Kt, 1, Ie, 1, &Big); } // Bottom node - if ((Xdata.SoilNode > 0) && soil_flux) { + if ((Xdata.SoilNode > 0) && soil_flux && variant != "SEAICE") { // Neumann BC at bottom: The lower boundary is now a heat flux -- put the heat flux in dU[0] EL_INCID(0, Ie); EL_TEMP(Ie, T0, TN, NDS, U); @@ -1003,15 +1162,21 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd // Dirichlet BC at bottom: prescribed temperature value // NOTE Insert Big at this location to hold the temperature constant at the prescribed value. Ie[0] = 0; - ds_AssembleMatrix((SD_MATRIX_DATA*)Kt, 1, Ie, 1, &Big); + ds_AssembleMatrix((SD_MATRIX_DATA*) Kt, 1, Ie, 1, &Big); } /* * Solve the linear system of equation. The te_F vector is used first as right- * hand-side vector for the linear system. The solver stores in this vector * the solution of the system of equations, the new temperature. - */ - ds_Solve( ComputeSolution, (SD_MATRIX_DATA*)Kt, dU ); + * It will throw an exception whenever the linear solver failed + */ + if (!ds_Solve(ComputeSolution, (SD_MATRIX_DATA*) Kt, dU)) { + prn_msg(__FILE__, __LINE__, "err", Mdata.date, + "Linear solver failed to solve for dU on the %d-th iteration.", + iteration); + throw IOException("Runtime error in compTemperatureProfile", AT); + } // Update the solution vectors and check for convergence for (size_t n = 0; n < nN; n++) ddU[n] = dU[n] - ddU[n]; @@ -1029,11 +1194,17 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd * must be constant. This means that the fluxes must be treated explicitely * (see neumannBoundaryConditions) */ - if (U[nE] + ddU[nE] > EMS[nE-1].melting_tk || EMS[nE-1].theta[WATER] > 0.) { - ControlTemp = 0.007; + if (U[nE] + ddU[nE] > EMS[nE-1].meltfreeze_tk || EMS[nE-1].theta[WATER] > 0.) { + ControlTemp = (variant == "SEAICE") ? (0.0001) : (0.007); MaxItnTemp = std::max(MaxItnTemp, (unsigned)200); // NOTE originally 100; } - NotConverged = (MaxTDiff > ControlTemp); + if(coupled_phase_changes) { + // With new phase change, we want at least one iteration extra, to account for possible phase changes, + // and we want an additional constraint of maximum change in phase change amount + NotConverged = (MaxTDiff > ControlTemp || iteration == 1 || maxd > ((variant == "SEAICE") ? (1.E-5) : (0.0001))); + } else { + NotConverged = (MaxTDiff > ControlTemp); + } if (iteration > MaxItnTemp) { if (ThrowAtNoConvergence) { prn_msg(__FILE__, __LINE__, "err", Mdata.date, @@ -1062,8 +1233,14 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd NotConverged = false; // Ensure we leave the do...while loop } } - for (size_t n = 0; n < nN; n++) + for (size_t n = 0; n < nN; n++) { U[n] += ddU[ n ]; + // If the solver converged, but we are seeing crazy nodes, and the function is not requested to throw at no convergence + // we set the flag TempEqConverged to false, such that the solver can try with a smaller time step + if ( ! ((U[n] > t_crazy_min) && (U[n] < t_crazy_max)) ) { + if ( !NotConverged && !ThrowAtNoConvergence ) TempEqConverged = false; + } + } } while ( NotConverged ); // end Convergence Loop if (TempEqConverged) { @@ -1132,6 +1309,12 @@ bool Snowpack::compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xd } } free(U); free(dU); free(ddU); + if (coupled_phase_changes) { + // Ensure that when top element consists of ice, its upper node does not exceed melting temperature + // This is to have consistent surface energy balance calculation and for having good looking output + if (nE > 0 && Xdata.Edata[nE-1].theta[ICE] > Constants::eps) NDS[nE].T=std::min(Xdata.Edata[nE-1].meltfreeze_tk, NDS[nE].T); + } + return TempEqConverged; } @@ -1158,6 +1341,8 @@ void Snowpack::setHydrometeorMicrostructure(const CurrentMeteo& Mdata, const boo // Because density and volumetric contents are already defined, redo it here elem.Rho = 110.; elem.theta[ICE] = elem.Rho / Constants::density_ice; // ice content + elem.theta_i_reservoir = 0.0; + elem.theta_i_reservoir_cumul = 0.0; elem.theta[AIR] = 1. - elem.theta[ICE]; // void content } else { // no Graupel elem.mk = Snowpack::new_snow_marker; @@ -1175,7 +1360,7 @@ void Snowpack::setHydrometeorMicrostructure(const CurrentMeteo& Mdata, const boo elem.dd = new_snow_dd; elem.sp = new_snow_sp; // Adapt dd and sp for blowing snow - if ((Mdata.vw > 5.) && ((variant == "ANTARCTICA") + if ((Mdata.vw > 5.) && ((variant == "ANTARCTICA" || variant == "POLAR") || (!SnLaws::jordy_new_snow && ((hn_density_parameterization == "BELLAIRE") || (hn_density_parameterization == "LEHNING_NEW"))))) { elem.dd = new_snow_dd_wind; @@ -1228,7 +1413,10 @@ void Snowpack::fillNewSnowElement(const CurrentMeteo& Mdata, const double& lengt // Volumetric components elem.theta[SOIL] = 0.0; elem.theta[ICE] = elem.Rho/Constants::density_ice; + elem.theta_i_reservoir = 0.0; + elem.theta_i_reservoir_cumul = 0.0; elem.theta[WATER] = 0.0; + elem.theta[WATER_PREF] = 0.0; elem.theta[AIR] = 1. - elem.theta[ICE]; for (unsigned short ii = 0; ii < number_of_solutes; ii++) { elem.conc[ICE][ii] = Mdata.conc[ii]*Constants::density_ice/Constants::density_water; @@ -1262,8 +1450,121 @@ void Snowpack::fillNewSnowElement(const CurrentMeteo& Mdata, const double& lengt elem.snowType(); // Snow classification //Initialise the Stability Index for ml_st_CheckStability routine - elem.S_dr = INIT_STABILITY; + elem.S_dr = IOUtils::nodata; elem.hard = IOUtils::nodata; + + elem.h = Constants::undefined; //Pressure head not initialized yet + + //Initial snow salinity + if (variant == "SEAICE" ) elem.salinity = SeaIce::InitSnowSalinity; + + double p_vapor = Atmosphere::vaporSaturationPressure(elem.Te); + elem.rhov = Atmosphere::waterVaporDensity(elem.Te, p_vapor); +} + +/** + * @brief Introduce new snow elements as technical snow + * @details When there is natural snow as well as man-made snow, + * the whole snow fall will have the properties of man-made snow. + * @param Mdata Meteorological data + * @param Xdata Snow cover data + * @param cumu_precip cumulated amount of precipitation (kg m-2) + */ +void Snowpack::compTechnicalSnow(const CurrentMeteo& Mdata, SnowStation& Xdata, double& cumu_precip) +{ + const size_t nOldN = Xdata.getNumberOfNodes(); //Old number of nodes + const size_t nOldE = Xdata.getNumberOfElements(); //Old number of elements + const double cos_sl = Xdata.cos_sl; //slope cosinus + + double Tw, rho_hn, delta_cH, theta_w; + TechSnow::productionPpt(Mdata, cumu_precip, Tw, rho_hn, delta_cH, theta_w); + + // Now determine whether the increase in snow depth is large enough. + double hn = 0.; //new snow amount + if ( (delta_cH >= height_new_elem * cos_sl) ) { + cumu_precip = 0.0; // we use the mass through delta_cH + hn = delta_cH; + } + if (hn > Snowpack::snowfall_warning) + prn_msg(__FILE__, __LINE__, "wrn", Mdata.date, + "Large snowfall! hn=%.3f cm (azi=%.0f, slope=%.0f)", + M_TO_CM(hn), Xdata.meta.getAzimuth(), Xdata.meta.getSlopeAngle()); + + const size_t nAddE = (size_t)(hn / (height_new_elem*cos_sl)); + + if (nAddE < 1) return; + + Xdata.Albedo = Constants::max_albedo; + + const size_t nNewN = nOldN + nAddE; + const size_t nNewE = nOldE + nAddE; + Xdata.resize(nNewE); + vector& NDS = Xdata.Ndata; + vector& EMS = Xdata.Edata; + + // Fill the nodal data + if (!useSoilLayers && (nOldN-1 == Xdata.SoilNode)) // New snow on bare ground w/o soil + NDS[nOldN-1].T = Tw; // 0.5*(t_surf + Mdata.ta); + const double Ln = (hn / (double)nAddE); // New snow element length + double z0 = NDS[nOldN-1].z + NDS[nOldN-1].u + Ln; // Position of lowest new node + for (size_t n = nOldN; n < nNewN; n++) { //loop over the nodes + NDS[n].T = Tw; // t_surf Temperature of the new node + NDS[n].z = z0; // New nodal position + NDS[n].u = 0.0; // Initial displacement is 0 + NDS[n].hoar = 0.0; // The new snow surface hoar is set to zero + NDS[n].udot = 0.0; // Settlement rate is also 0 + NDS[n].f = 0.0; // Unbalanced forces are 0 + NDS[n].S_n = IOUtils::nodata; + NDS[n].S_s = IOUtils::nodata; + z0 += Ln; + } + + // Fill the element data + for (size_t e = nOldE; e < nNewE; e++) { //loop over the elements + const double length = (NDS[e+1].z + NDS[e+1].u) - (NDS[e].z + NDS[e].u); + fillNewSnowElement(Mdata, length, rho_hn, false, Xdata.number_of_solutes, EMS[e]); + + // Now give specific properties for technical snow, consider liquid water + // Assume that the user does not specify unreasonably high liquid water contents. + // This depends also on the density of the solid fraction - print a warning if it looks bad + EMS[e].theta[WATER] += theta_w; + + if ( (EMS[e].theta[WATER] + EMS[e].theta[ICE]) > 0.7) + prn_msg(__FILE__, __LINE__, "wrn", Mdata.date, + "Too much liquid water specified or density too high! Dry density =%.3f kg m-3 Water Content = %.3f %", rho_hn, theta_w); + + EMS[e].theta[AIR] = 1.0 - EMS[e].theta[WATER] - EMS[e].theta[WATER_PREF] - EMS[e].theta[ICE] - EMS[e].theta[SOIL]; + + if (EMS[e].theta[AIR] < 0.) { + prn_msg(__FILE__, __LINE__, "err", Mdata.date, "Error in technical snow input - no void fraction left"); + throw IOException("Runtime error in runSnowpackModel", AT); + } + + // To satisfy the energy balance, we should trigger an explicit treatment of the top boundary condition of the energy equation + // when new snow falls on top of wet snow or melting soil. This can be done by putting a tiny amount of liquid water in the new snow layers. + // Note that we use the same branching condition as in the function Snowpack::neumannBoundaryConditions(...) + const double theta_r = ((watertransportmodel_snow=="RICHARDSEQUATION" && Xdata.getNumberOfElements()>Xdata.SoilNode) || (watertransportmodel_soil=="RICHARDSEQUATION" && Xdata.getNumberOfElements()==Xdata.SoilNode)) ? (PhaseChange::RE_theta_threshold) : (PhaseChange::theta_r); + if(nOldE > 0 && EMS[nOldE-1].theta[WATER] > theta_r + Constants::eps && EMS[nOldE-1].theta[ICE] > Constants::eps) { + EMS[e].theta[WATER]+=(2.*Constants::eps); + EMS[e].theta[ICE]-=(2.*Constants::eps)*(Constants::density_water/Constants::density_ice); + EMS[e].theta[AIR]+=((Constants::density_water/Constants::density_ice)-1.)*(2.*Constants::eps); + } + EMS[e].meltfreeze_tk = Constants::meltfreeze_tk; + Xdata.ColdContent += EMS[e].coldContent(); //update cold content + + // Now adjust default new element values to technical snow (mk = 6) + EMS[e].mk = 6; + EMS[e].dd = 0.; + EMS[e].sp = 1.; + EMS[e].rg = 0.2; // Have to adapt after some tests + EMS[e].rb = EMS[e].rg/3.; + + } // End elements + + // Finally, update the computed snowpack height + Xdata.cH = NDS[nNewN-1].z + NDS[nNewN-1].u; + Xdata.ErosionLevel = nNewE-1; + } /** @@ -1282,6 +1583,11 @@ void Snowpack::fillNewSnowElement(const CurrentMeteo& Mdata, const double& lengt void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, double& cumu_precip, SurfaceFluxes& Sdata) { + if (Mdata.psum_tech!=Constants::undefined && Mdata.psum_tech > 0.) { + compTechnicalSnow(Mdata, Xdata, cumu_precip); + return; + } + bool add_element = false; double delta_cH = 0.; // Actual enforced snow depth double hn = 0.; //new snow amount @@ -1333,8 +1639,8 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl cumu_precip -= Mdata.psum; //if there is no precip, this does nothing return; } - } else { // HS driven - delta_cH = Xdata.mH - Xdata.cH; + } else { // HS driven, correct for a possible offset in measured snow height provided by a marked reference layer + delta_cH = Xdata.mH - Xdata.cH + ( (Xdata.findMarkedReferenceLayer() == Constants::undefined) ? (0.) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground) ); } if (rho_hn == Constants::undefined) return; @@ -1342,8 +1648,8 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl // Let's check for the solid precipitation thresholds: // -> check relative humidity as well as difference between air and snow surface temperatures, // that is, no new snow during cloud free conditions! - const double melting_tk = (nOldE>0)? Xdata.Edata[nOldE-1].melting_tk : Constants::melting_tk; - const double dtempAirSnow = (change_bc && !meas_tss)? Mdata.ta - melting_tk : Mdata.ta - t_surf; //we use t_surf only if meas_tss & change_bc + const double meltfreeze_tk = (nOldE>0)? Xdata.Edata[nOldE-1].meltfreeze_tk : Constants::meltfreeze_tk; + const double dtempAirSnow = (change_bc && !meas_tss)? Mdata.ta - meltfreeze_tk : Mdata.ta - t_surf; //we use t_surf only if meas_tss & change_bc const bool snow_fall = (((Mdata.rh > thresh_rh) && (Mdata.psum_ph<1.) && (dtempAirSnow < thresh_dtempAirSnow)) || !enforce_measured_snow_heights || (Xdata.hn > 0.)); @@ -1355,17 +1661,21 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl } else { snowed_in = ((Xdata.getNumberOfNodes() > Xdata.SoilNode+1) || (detect_grass && - (((Mdata.tss_a24h < IOUtils::C_TO_K(TSS_threshold24)) - && (Mdata.hs_rate > HS_threshold_smallincrease)) - || ((Mdata.tss_a12h < IOUtils::C_TO_K(TSS_threshold12_smallHSincrease)) - && (Mdata.hs_rate > HS_threshold_smallincrease)) - || ((Mdata.tss_a12h < IOUtils::C_TO_K(TSS_threshold12_largeHSincrease)) - && (Mdata.hs_rate > HS_threshold_largeincrease)) - ) + (((Mdata.tss_a24h < IOUtils::C_TO_K(TSS_threshold24)) + && (Mdata.hs_rate > HS_threshold_smallincrease)) + || ((Mdata.tss_a12h < IOUtils::C_TO_K(TSS_threshold12_smallHSincrease)) + && (Mdata.hs_rate > HS_threshold_smallincrease)) + || ((Mdata.tss_a12h < IOUtils::C_TO_K(TSS_threshold12_largeHSincrease)) + && (Mdata.hs_rate > HS_threshold_largeincrease)) + ) ) || (Mdata.hs_rate > HS_threshold_verylargeincrease) ); } + if (variant == "SEAICE" && nOldE == 0) { + // Ignore snow fall on open ocean + snowed_in = false; + } // Go ahead if there is a snow fall AND the ground is or can be snowed in. if (snow_fall && snowed_in) { @@ -1374,11 +1684,11 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl // We also adjust Xdata.mH to have it reflect deposited snow but not the canopy. // This can only be done when SNOWPACK is snow height driven and there is a canopy. if ((enforce_measured_snow_heights) - && (Xdata.Cdata.height > 0.) - && ((Xdata.Cdata.height < ThresholdSmallCanopy) || (useCanopyModel == false)) - && (Mdata.hs != mio::IOUtils::nodata) - && (Xdata.mH != Constants::undefined) - && (Xdata.meta.getSlopeAngle() < Constants::min_slope_angle)) { + && (Xdata.Cdata.height > 0.) + && ((Xdata.Cdata.height < ThresholdSmallCanopy) || (useCanopyModel == false)) + && (Mdata.hs != mio::IOUtils::nodata) + && (Xdata.mH != Constants::undefined) + && (Xdata.meta.getSlopeAngle() < Constants::min_slope_angle)) { /* The third clause above limits the issue to small canopies only, to prevent problems * with Alpine3D simulations in forests. This prerequisite is only checked for when useCanopyModel * is true. If useCanopyModel is false, we can safely assume all snow to fall on top of canopy. @@ -1412,7 +1722,7 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl Xdata.mH -= Xdata.Cdata.height; // Adjust Xdata.mH to represent the "true" enforced snow depth if (Xdata.mH < Xdata.Ground) // and make sure it doesn't get negative Xdata.mH = Xdata.Ground; - delta_cH = Xdata.mH - Xdata.cH; + delta_cH = Xdata.mH - Xdata.cH + ( (Xdata.findMarkedReferenceLayer() == Constants::undefined) ? (0.) : (Xdata.findMarkedReferenceLayer() - Xdata.Ground) ); } // Now determine whether the increase in snow depth is large enough. @@ -1472,7 +1782,7 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl Xdata.Ndata[nOldN-1].hoar = 0.; } - Xdata.Albedo = Snowpack::new_snow_albedo; + Xdata.Albedo = Constants::max_albedo; const size_t nNewN = nOldN + nAddE + nHoarE; const size_t nNewE = nOldE + nAddE + nHoarE; @@ -1494,16 +1804,17 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl EMS[nOldE-1].theta[ICE] *= L0/EMS[nOldE-1].L; EMS[nOldE-1].theta[ICE] += -hoar/(Constants::density_ice*EMS[nOldE-1].L); EMS[nOldE-1].theta[ICE] = std::max(EMS[nOldE-1].theta[ICE],0.); + EMS[nOldE-1].theta_i_reservoir = 0.0; + EMS[nOldE-1].theta_i_reservoir_cumul = 0.0; EMS[nOldE-1].theta[WATER] *= L0/EMS[nOldE-1].L; + EMS[nOldE-1].theta[WATER_PREF] *= L0/EMS[nOldE-1].L; for (unsigned int ii = 0; ii < Xdata.number_of_solutes; ii++) EMS[nOldE-1].conc[ICE][ii] *= L0*Theta0/(EMS[nOldE-1].theta[ICE]*EMS[nOldE-1].L); EMS[nOldE-1].M -= hoar; assert(EMS[nOldE-1].M>=0.); //the mass must be positive - EMS[nOldE-1].theta[AIR] = std::max(0., 1.0 - EMS[nOldE-1].theta[WATER] + EMS[nOldE-1].theta[AIR] = std::max(0., 1.0 - EMS[nOldE-1].theta[WATER] - EMS[nOldE-1].theta[WATER_PREF] - EMS[nOldE-1].theta[ICE] - EMS[nOldE-1].theta[SOIL]); - EMS[nOldE-1].Rho = (EMS[nOldE-1].theta[ICE] * Constants::density_ice) - + (EMS[nOldE-1].theta[WATER] * Constants::density_water) - + (EMS[nOldE-1].theta[SOIL] * EMS[nOldE-1].soil[SOIL_RHO]); + EMS[nOldE-1].updDensity(); assert(EMS[nOldE-1].Rho>=0. || EMS[nOldE-1].Rho==IOUtils::nodata); //we want positive density // Take care of old surface node NDS[nOldN-1].z += dL + NDS[nOldN-1].u; @@ -1511,14 +1822,16 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl NDS[nOldN-1].hoar = 0.0; // Now fill nodal data for upper hoar node NDS[nOldN].T = t_surf; // The temperature of the new node + double p_vapor = Atmosphere::vaporSaturationPressure(NDS[nOldN].T); + NDS[nOldN].rhov = Atmosphere::waterVaporDensity(NDS[nOldN].T, p_vapor); // The new nodal position; NDS[nOldN].z = NDS[nOldN-1].z + NDS[nOldN-1].u + hoar/hoar_density_buried; NDS[nOldN].u = 0.0; // Initial displacement is 0 NDS[nOldN].hoar = hoar / hoar_density_buried; // Surface hoar initial size NDS[nOldN].udot = 0.0; // Settlement rate is also 0 NDS[nOldN].f = 0.0; // Unbalanced forces is 0 - NDS[nOldN].S_n = INIT_STABILITY; - NDS[nOldN].S_s = INIT_STABILITY; + NDS[nOldN].S_n = IOUtils::nodata; + NDS[nOldN].S_s = IOUtils::nodata; } else { // Make sure top node surface hoar mass is removed NDS[nOldN-1].hoar = 0.0; } @@ -1530,13 +1843,15 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl double z0 = NDS[nOldN-1+nHoarE].z + NDS[nOldN-1+nHoarE].u + Ln; // Position of lowest new node for (size_t n = nOldN+nHoarE; n < nNewN; n++) { //loop over the nodes NDS[n].T = t_surf; // Temperature of the new node + double p_vapor = Atmosphere::vaporSaturationPressure(NDS[n].T); + NDS[n].rhov = Atmosphere::waterVaporDensity(NDS[n].T, p_vapor); NDS[n].z = z0; // New nodal position NDS[n].u = 0.0; // Initial displacement is 0 NDS[n].hoar = 0.0; // The new snow surface hoar is set to zero NDS[n].udot = 0.0; // Settlement rate is also 0 NDS[n].f = 0.0; // Unbalanced forces are 0 - NDS[n].S_n = INIT_STABILITY; - NDS[n].S_s = INIT_STABILITY; + NDS[n].S_n = IOUtils::nodata; + NDS[n].S_s = IOUtils::nodata; z0 += Ln; } @@ -1555,6 +1870,13 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl EMS[e].theta[ICE]-=(2.*Constants::eps)*(Constants::density_water/Constants::density_ice); EMS[e].theta[AIR]+=((Constants::density_water/Constants::density_ice)-1.)*(2.*Constants::eps); } + if (variant == "SEAICE" ) { + const double BrineSal_new = (EMS[e].theta[WATER] == 0.) ? (0.) : (EMS[e].salinity / EMS[e].theta[WATER]); + EMS[e].meltfreeze_tk = Xdata.Seaice->calculateMeltingTemperature(BrineSal_new); + } else { + EMS[e].meltfreeze_tk = Constants::meltfreeze_tk; + } + Xdata.ColdContent += EMS[e].coldContent(); //update cold content } // End elements @@ -1637,14 +1959,21 @@ void Snowpack::compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, doubl * @param Bdata * @param Sdata */ -void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& cumu_precip, +void Snowpack::runSnowpackModel(CurrentMeteo& Mdata, SnowStation& Xdata, double& cumu_precip, BoundCond& Bdata, SurfaceFluxes& Sdata, double mass_erode) { // HACK -> couldn't the following objects be created once in init ?? (with only a reset method ??) WaterTransport watertransport(cfg); + //VapourTransport vapourtransport(cfg); Metamorphism metamorphism(cfg); SnowDrift snowdrift(cfg); PhaseChange phasechange(cfg); + if (Xdata.Seaice != NULL) Xdata.Seaice->ConfigSeaIce(cfg); + + // ADJUST_HEIGHT_OF_METEO_VALUE is checked at each call to allow different + // cfg values for different pixels in Alpine3D + cfg.getValue("ADJUST_HEIGHT_OF_METEO_VALUES", "SnowpackAdvanced", adjust_height_of_meteo_values); + try { //since precipitation phase is a little less intuitive than other, measured parameters, make sure it is provided @@ -1653,8 +1982,8 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& // Set and adjust boundary conditions surfaceCode = NEUMANN_BC; - double melting_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].melting_tk : Constants::melting_tk; - t_surf = std::min(melting_tk, Xdata.Ndata[Xdata.getNumberOfNodes()-1].T); + double meltfreeze_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].meltfreeze_tk : Constants::meltfreeze_tk; + t_surf = std::min(meltfreeze_tk, Xdata.Ndata[Xdata.getNumberOfNodes()-1].T); if (change_bc && meas_tss) { if ((Mdata.tss < IOUtils::C_TO_K(thresh_change_bc)) && Mdata.tss != IOUtils::nodata){ surfaceCode = DIRICHLET_BC; @@ -1673,33 +2002,46 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& double tmp=0.; snowdrift.compSnowDrift(Mdata, Xdata, Sdata, tmp); } else - { + { double tmp = mass_erode; snowdrift.compSnowDrift(Mdata, Xdata, Sdata, tmp); } - // snowdrift.compSnowDrift(Mdata, Xdata, Sdata, cumu_precip); + if (Xdata.Seaice != NULL) { + // Reinitialize and compute the initial meteo heat fluxes + Bdata.reset(); + updateBoundHeatFluxes(Bdata, Xdata, Mdata); + // Run sea ice module + Xdata.Seaice->runSeaIceModule(Xdata, Mdata, Bdata, sn_dt, Sdata); + // Remesh when necessary + Xdata.splitElements(2. * comb_thresh_l, comb_thresh_l); + } + const double sn_dt_bcu = sn_dt; // Store original SNOWPACK time step const double psum_bcu = Mdata.psum; // Store original psum value const double psum_ph_bcu = Mdata.psum_ph; // Store original psum_ph value int ii = 0; // Counter for sub-timesteps to match one SNOWPACK time step bool LastTimeStep = false; // Flag to indicate if it is the last sub-time step double p_dt = 0.; // Cumulative progress of time steps - if ((Mdata.psi_s >= 0. || t_surf > Mdata.ta) && atm_stability_model != Meteo::NEUTRAL && allow_adaptive_timestepping == true) { + if ((Mdata.psi_s >= 0. || t_surf > Mdata.ta) && atm_stability_model != Meteo::NEUTRAL && allow_adaptive_timestepping == true && sn_dt > 60.) { // To reduce oscillations in TSS, reduce the time step prematurely when atmospheric stability is unstable. if (Mdata.psum != mio::IOUtils::nodata) Mdata.psum /= sn_dt; // psum is precipitation per time step, so first express it as rate with the old time step (necessary for rain only)... sn_dt = 60.; if (Mdata.psum != mio::IOUtils::nodata) Mdata.psum *= sn_dt; // ... then express psum again as precipitation per time step with the new time step } + + Meteo meteo(cfg); do { - if (ii >= 1) { - // After the first sub-time step, update Meteo object to reflect on the new stability state - Meteo M(cfg); - M.compMeteo(Mdata, Xdata, false); + // After the first sub-time step, update Meteo object to reflect on the new stability state + if (ii >= 1){ + // ADJUST_HEIGHT_OF_WIND_VALUE is checked at each call to allow different + // cfg values for different pixels in Alpine3D + cfg.getValue("ADJUST_HEIGHT_OF_WIND_VALUE", "SnowpackAdvanced", adjust_height_of_wind_value); + meteo.compMeteo(Mdata, Xdata, false, adjust_height_of_wind_value); } // Reinitialize and compute the initial meteo heat fluxes - memset((&Bdata), 0, sizeof(BoundCond)); + Bdata.reset(); updateBoundHeatFluxes(Bdata, Xdata, Mdata); // set the snow albedo @@ -1707,7 +2049,8 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& Xdata.Albedo = getModelAlbedo(Xdata, Mdata); //either parametrized or measured // Compute the temperature profile in the snowpack and soil, if present - if (compTemperatureProfile(Mdata, Xdata, Bdata, (allow_adaptive_timestepping == true)?(false):(true))) { + for (size_t e = 0; e < Xdata.getNumberOfElements(); e++) Xdata.Edata[e].Qph_up = Xdata.Edata[e].Qph_down = 0.; + if (compTemperatureProfile(Mdata, Xdata, Bdata, (sn_dt < min_allowed_sn_dt))) { // Entered after convergence ii++; // Update time step counter p_dt += sn_dt; // Update progress variable @@ -1722,11 +2065,12 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& if ((change_bc && meas_tss) && (surfaceCode == NEUMANN_BC) && (Xdata.Ndata[Xdata.getNumberOfNodes()-1].T < mio::IOUtils::C_TO_K(thresh_change_bc))) { surfaceCode = DIRICHLET_BC; - melting_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].melting_tk : Constants::melting_tk; - Xdata.Ndata[Xdata.getNumberOfNodes()-1].T = std::min(Mdata.tss, melting_tk); /*C_TO_K(thresh_change_bc/2.);*/ + meltfreeze_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].meltfreeze_tk : Constants::meltfreeze_tk; + Xdata.Ndata[Xdata.getNumberOfNodes()-1].T = std::min(Mdata.tss, meltfreeze_tk); /*C_TO_K(thresh_change_bc/2.);*/ // update the snow albedo Xdata.pAlbedo = getParameterizedAlbedo(Xdata, Mdata); Xdata.Albedo = getModelAlbedo(Xdata, Mdata); //either parametrized or measured + for (size_t e = 0; e < Xdata.getNumberOfElements(); e++) Xdata.Edata[e].Qph_up = Xdata.Edata[e].Qph_down = 0.; compTemperatureProfile(Mdata, Xdata, Bdata, true); // Now, throw on non-convergence } if (LastTimeStep) Sdata.compSnowSoilHeatFlux(Xdata); @@ -1735,10 +2079,38 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& if (ii == 1) phasechange.initialize(Xdata); // See if any SUBSURFACE phase changes are occuring due to updated temperature profile - if (!alpine3d) - phasechange.compPhaseChange(Xdata, Mdata.date); - else - phasechange.compPhaseChange(Xdata, Mdata.date, false); + if(!coupled_phase_changes && Xdata.getNumberOfElements() != 0) { + if (!alpine3d) + phasechange.compPhaseChange(Xdata, Mdata.date); + else + phasechange.compPhaseChange(Xdata, Mdata.date, false); + } else { + const double theta_r = ((watertransportmodel_snow=="RICHARDSEQUATION" && Xdata.getNumberOfElements()>Xdata.SoilNode) || (watertransportmodel_soil=="RICHARDSEQUATION" && Xdata.getNumberOfElements()==Xdata.SoilNode)) ? (PhaseChange::RE_theta_r) : (PhaseChange::theta_r); + const double max_ice = ReSolver1d::max_theta_ice; + for (size_t e = 0; e < Xdata.getNumberOfElements(); e++) { + // Net ice contents change: + double dth_i = 0.5 * (Xdata.Edata[e].Qph_up + Xdata.Edata[e].Qph_down) / ((Constants::density_ice * Constants::lh_fusion) / sn_dt); + // Limit to all ice melts: + dth_i = (dth_i<0.)?(std::max(-Xdata.Edata[e].theta[ICE], dth_i)):(dth_i); + // Limit to all liquid water freezes: + dth_i = (dth_i>0.)?(std::min(std::max(0., std::min(max_ice - Xdata.Edata[e].theta[ICE], (Xdata.Edata[e].theta[WATER] - theta_r) * (Constants::density_water / Constants::density_ice))), dth_i)):(dth_i); + // Apply phase change: + Xdata.Edata[e].dth_w -= dth_i * Constants::density_ice / Constants::density_water; + Xdata.Edata[e].Qmf += (dth_i * Constants::density_ice * Constants::lh_fusion) / sn_dt_bcu; // (W m-3) + Xdata.Edata[e].theta[ICE] += dth_i; + Xdata.Edata[e].theta[WATER] -= dth_i*Constants::density_ice/Constants::density_water; + Xdata.Edata[e].theta[AIR] = 1. - Xdata.Edata[e].theta[WATER] - Xdata.Edata[e].theta[WATER_PREF] - Xdata.Edata[e].theta[ICE] - Xdata.Edata[e].theta[SOIL]; + Xdata.Edata[e].updDensity(); + Xdata.Edata[e].heatCapacity(); + Xdata.Edata[e].Qph_up = Xdata.Edata[e].Qph_down = 0.; + + if (Xdata.Seaice != NULL) { + // Adjust melting/freezing point assuming thermal quilibrium in the brine pockets + const double BrineSal_new = (Xdata.Edata[e].theta[WATER] == 0.) ? (0.) : (Xdata.Edata[e].salinity / Xdata.Edata[e].theta[WATER]); + Xdata.Edata[e].meltfreeze_tk = Xdata.Seaice->calculateMeltingTemperature(BrineSal_new); + } + } + } // Compute the final heat fluxes at the last sub-time step if (LastTimeStep) Sdata.ql += Bdata.ql; // Bad;-) HACK, needed because latent heat ql is not (yet) @@ -1758,17 +2130,6 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& sn_dt /= 2.; // No convergence, half the time step if (Mdata.psum != mio::IOUtils::nodata) Mdata.psum *= sn_dt; // ... then express psum again as precipitation per time step with the new time step - if (sn_dt < 0.01) { // If time step gets too small, we are lost - prn_msg(__FILE__, __LINE__, "err", Mdata.date, "Temperature equation did not converge, even after reducing time step (azi=%.0lf, slope=%.0lf).", Xdata.meta.getAzimuth(), Xdata.meta.getSlopeAngle()); - for (size_t n = 0; n < Xdata.getNumberOfNodes(); n++) { - prn_msg(__FILE__, __LINE__, "msg-", Date(), - "N[%03d]: %8.4lf K", n, Xdata.Ndata[n].T); - } - prn_msg(__FILE__, __LINE__, "msg", Date(), - "Latent: %lf Sensible: %lf Rain: %lf NetLong:%lf NetShort: %lf", - Bdata.ql, Bdata.qs, Bdata.qr, Bdata.lw_net, Mdata.iswr - Mdata.rswr); - throw IOException("Runtime error in runSnowpackModel", AT); - } std::cout << " --> time step temporarily reduced to: " << sn_dt << "\n"; } } @@ -1784,16 +2145,21 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& // The water transport routines must be placed here, otherwise the temperature // and creep solution routines will not pick up the new mesh boolean. - watertransport.compTransportMass(Mdata, Bdata.ql, Xdata, Sdata); + double ql = Bdata.ql; // Variable to keep track of how latent heat is used + watertransport.compTransportMass(Mdata, Xdata, Sdata, ql); + +// vapourtransport.compTransportMass(Mdata, ql, Xdata, Sdata); // See if any SUBSURFACE phase changes are occuring due to updated water content (infiltrating rain/melt water in cold snow layers) - if(!alpine3d) - phasechange.compPhaseChange(Xdata, Mdata.date); - else - phasechange.compPhaseChange(Xdata, Mdata.date, false); + if(!coupled_phase_changes && Xdata.getNumberOfElements() != 0) { + if(!alpine3d) + phasechange.compPhaseChange(Xdata, Mdata.date); + else + phasechange.compPhaseChange(Xdata, Mdata.date, false); - // Finalize PhaseChange - phasechange.finalize(Sdata, Xdata, Mdata.date); + // Finalize PhaseChange + phasechange.finalize(Sdata, Xdata, Mdata.date); + } // Compute change of internal energy during last time step (J m-2) Xdata.compSnowpackInternalEnergyChange(sn_dt); @@ -1804,8 +2170,9 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& // computeSnowTemperatures where the vectors U, dU and dUU are allocated. compSnowCreep(Mdata, Xdata); - } catch(const exception&) { + } catch(const exception& e) { // prevent silent failure prn_msg(__FILE__, __LINE__, "err", Mdata.date, "Snowpack computation not completed"); + std::cerr << "[ERROR] Snowpack exception: " << e.what() << std::endl; throw; } @@ -1813,10 +2180,16 @@ void Snowpack::runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& if (combine_elements) { // Check for combining elements - Xdata.combineElements(SnowStation::number_top_elements, reduce_n_elements, 1); + Xdata.combineElements(SnowStation::number_top_elements, reduce_n_elements, 1, comb_thresh_l); // Check for splitting elements if (reduce_n_elements) { - Xdata.splitElements(); + Xdata.splitElements(-1., comb_thresh_l); } } } + +void Snowpack::snowPreparation(const mio::Date& currentDate, SnowStation& Xdata) const +{ + if (techsnow.prepare(currentDate)) + techsnow.preparation(Xdata); +} diff --git a/third_party/snowpack/snowpackCore/Snowpack.h b/third_party/snowpack/snowpackCore/Snowpack.h index 15b5bb17..b3b30d73 100644 --- a/third_party/snowpack/snowpackCore/Snowpack.h +++ b/third_party/snowpack/snowpackCore/Snowpack.h @@ -26,8 +26,12 @@ #ifndef SNOWPACK_H #define SNOWPACK_H -#include "../DataClasses.h" #include "../Meteo.h" +#include "../DataClasses.h" +#include "../SnowDrift.h" +#include "../TechnicalSnow.h" +#include "Metamorphism.h" +#include "PhaseChange.h" #include #include @@ -38,27 +42,49 @@ class Snowpack { - public: + public: Snowpack(const SnowpackConfig& i_cfg); - void runSnowpackModel(CurrentMeteo Mdata, SnowStation& Xdata, double& cumu_precip, + void runSnowpackModel(CurrentMeteo& Mdata, SnowStation& Xdata, double& cumu_precip, BoundCond& Bdata, SurfaceFluxes& Sdata,double mass_erode); + /** + * @brief Perform snow preparation (grooming, etc) on a given snowpack + * @param currentDate the current date, to determine if grooming should be performed + * @param Xdata snowpack to work on + */ + void snowPreparation(const mio::Date& currentDate, SnowStation& Xdata) const; + void setUseSoilLayers(const bool& value); - const static double new_snow_albedo, min_ice_content; + const static double min_ice_content; + + double getSnDt() const { return sn_dt;} + + void setSnDt(const double& snDt) { sn_dt = snDt;} - private: /** - * @brief Specifies what kind of boundary condition is to be implemented at the top surface \n - * - 0 : use surface fluxes (NEUMANN_BC) - * - 1 : use prescribed surface temperature (DIRICHLET_BC) + * @brief Specifies what kind of boundary condition is to be implemented at the top surface. + * Either use surface fluxes (NEUMANN_BC) or use a prescribed surface temperature (DIRICHLET_BC) */ enum BoundaryCondition { NEUMANN_BC, DIRICHLET_BC }; - - static void EL_INCID(const size_t &e, int Ie[]); + + double getParameterizedAlbedo(const SnowStation& Xdata, + const CurrentMeteo& Mdata) const; + double getModelAlbedo(const SnowStation& Xdata, CurrentMeteo& Mdata) const; + + protected: + + bool compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xdata, + BoundCond& Bdata, + const bool& ThrowAtNoConvergence); + + BoundaryCondition surfaceCode; + + private: + static void EL_INCID(const int &e, int Ie[]); static void EL_TEMP( const int Ie[], double Te0[], double Tei[], const std::vector &T0, const double Ti[] ); static void EL_RGT_ASSEM(double F[], const int Ie[], const double Fe[]); @@ -79,11 +105,6 @@ class Snowpack { double Se[ N_OF_INCIDENCES ][ N_OF_INCIDENCES ], double Fe[ N_OF_INCIDENCES ]); - double getParameterizedAlbedo(const SnowStation& Xdata, const CurrentMeteo& Mdata) const; - double getModelAlbedo(const SnowStation& Xdata, CurrentMeteo& Mdata) const; - - bool compTemperatureProfile(const CurrentMeteo& Mdata, SnowStation& Xdata, BoundCond& Bdata, const bool& ThrowAtNoConvergence); - void assignSomeFluxes(SnowStation& Xdata, const CurrentMeteo& Mdata, const double& mAlb, SurfaceFluxes& Sdata); @@ -92,16 +113,19 @@ class Snowpack { void fillNewSnowElement(const CurrentMeteo& Mdata, const double& length, const double& density, const bool& is_surface_hoar, const unsigned short& number_of_solutes, ElementData &elem); + void compTechnicalSnow(const CurrentMeteo& Mdata, SnowStation& Xdata, double& cumu_precip); + void compSnowFall(const CurrentMeteo& Mdata, SnowStation& Xdata, double& cumu_precip, SurfaceFluxes& Sdata); const SnowpackConfig& cfg; - BoundaryCondition surfaceCode; + + TechSnow techsnow; std::string variant, viscosity_model, watertransportmodel_snow, watertransportmodel_soil; std::string hn_density, hn_density_parameterization; std::string sw_mode, snow_albedo, albedo_parameterization, albedo_average_schmucki, sw_absorption_scheme; Meteo::ATM_STABILITY atm_stability_model; - bool allow_adaptive_timestepping; + double albedo_NIED_av; double albedo_fixedValue, hn_density_fixedValue; double meteo_step_length; double thresh_change_bc, geo_heat, height_of_meteo_values, height_new_elem, sn_dt; @@ -109,25 +133,29 @@ class Snowpack { double new_snow_dd, new_snow_sp, new_snow_dd_wind, new_snow_sp_wind, rh_lowlim, bond_factor_rh; double new_snow_grain_size, new_snow_bond_size; double hoar_density_buried, hoar_density_surf, hoar_min_size_buried; - double minimum_l_element; + double minimum_l_element, comb_thresh_l; double t_surf; - static const double min_snow_albedo; + bool allow_adaptive_timestepping; bool research_mode, useCanopyModel, enforce_measured_snow_heights, detect_grass; bool soil_flux, useSoilLayers; + bool coupled_phase_changes; bool combine_elements, reduce_n_elements, change_bc, meas_tss; bool vw_dendricity; bool enhanced_wind_slab; ///< to use an even stronger wind slab densification than implemented by default bool alpine3d; ///< triggers various tricks for Alpine3D (including reducing the number of warnings) bool ageAlbedo; ///< use the age of snow in the albedo parametrizations? default: true + const static double min_allowed_sn_dt; ///< minimum allowed snowpack time step for solving the heat equation const static bool hydrometeor; const static double snowfall_warning; const static unsigned int new_snow_marker; - bool adjust_height_of_meteo_values; + bool adjust_height_of_meteo_values, adjust_height_of_wind_value; bool advective_heat; double heat_begin, heat_end; double temp_index_degree_day, temp_index_swr_factor; bool forestfloor_alb; + bool rime_index, newsnow_lwc, read_dsm; + std::string soil_evaporation, soil_thermal_conductivity; }; //end class Snowpack #endif diff --git a/third_party/snowpack/snowpackCore/Solver.cc b/third_party/snowpack/snowpackCore/Solver.cc index cd8404df..8be98f64 100644 --- a/third_party/snowpack/snowpackCore/Solver.cc +++ b/third_party/snowpack/snowpackCore/Solver.cc @@ -1,26 +1,3 @@ -// -// Canadian Hydrological Model - The Canadian Hydrological Model (CHM) is a novel -// modular unstructured mesh based approach for hydrological modelling -// Copyright (C) 2018 Christopher Marsh -// -// This file is part of Canadian Hydrological Model. -// -// Canadian Hydrological Model is free software: you can redistribute it and/or -// modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Canadian Hydrological Model is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Canadian Hydrological Model. If not, see -// . -// - /* GENERAL INFO * AUTHOR: GUIDO SARTORIS ETH ZUERICH */ @@ -30,12 +7,13 @@ */ #include "Solver.h" -#include +#include +#include #include #include +#include #include //for memset -#include -#include +#include //for isnan #ifdef __clang__ #pragma clang diagnostic push @@ -69,35 +47,35 @@ int ComputeTmpConMatrix(SD_CON_MATRIX_DATA *pMat0, SD_TMP_CON_MATRIX_DATA *pMat) int ComputeFillIn(SD_TMP_CON_MATRIX_DATA *pMat); int ComputeBlockMatrix( SD_TMP_CON_MATRIX_DATA *pTmpMat, SD_BLOCK_MATRIX_DATA *pMat); -#define GD_MALLOC( POINTER, TYPE, N, MSG ) \ +#define GD_MALLOC( pointer, TYPE, N, MSG ) \ { \ - POINTER = (TYPE *)malloc( sizeof(TYPE)*(N+1) ); \ - if ( POINTER ) { \ + pointer = (TYPE *)malloc( sizeof(TYPE)*(N+1) ); \ + if ( pointer ) { \ gd_MemErr = false; \ } else { \ gd_MemErr = true; fprintf(stderr, "\n+++++ %s: %s\n", "NO SPACE TO ALLOCATE", MSG); \ } \ } -#define GD_REALLOC( POINTER, TYPE, N, MSG ) \ +#define GD_REALLOC( pointer, TYPE, N, MSG ) \ { \ - if ( POINTER ) { \ - POINTER = (TYPE *)realloc( (char*)POINTER, sizeof(TYPE)*(N+1) ); \ - if ( POINTER ) { \ + if ( pointer ) { \ + pointer = (TYPE *)realloc( (char*)pointer, sizeof(TYPE)*(N+1) ); \ + if ( pointer ) { \ gd_MemErr = false; \ } else { \ gd_MemErr = true; fprintf(stderr, "\n+++++ %s: %s\n", "NO SPACE TO REALLOCATE", MSG); \ } \ } else { \ - GD_MALLOC( POINTER, TYPE, N, MSG ); \ + GD_MALLOC( pointer, TYPE, N, MSG ); \ } \ } -#define GD_FREE( POINTER ) \ +#define GD_FREE( pointer ) \ { \ - if ( POINTER ) { \ - free ( (char*) POINTER ); \ - POINTER = NULL; \ + if ( pointer ) { \ + free ( (char*) pointer ); \ + pointer = NULL; \ } \ } \ @@ -296,8 +274,8 @@ int ComputeBlockMatrix( SD_TMP_CON_MATRIX_DATA *pTmpMat, SD_BLOCK_MATRIX_DATA *p { \ FOUND = 0; \ SD_COL_BLOCK_DATA *pB_ = (pFIRST_BLK); \ - size_t Col0_ = COL+1; \ - size_t Col1_ = COL-1; \ + int Col0_ = COL+1; \ + int Col1_ = COL-1; \ while ( pB_ ) \ { if ( Col1_ > pB_->Col1 ) { ppBLK = &pB_->Next; pB_ = pB_->Next; } \ else if ( Col0_ >= pB_->Col0 ) \ @@ -414,7 +392,7 @@ int ComputeBlockMatrix( SD_TMP_CON_MATRIX_DATA *pTmpMat, SD_BLOCK_MATRIX_DATA *p * @param pMat SD_CON_MATRIX_DATA * @return int */ -inline int AllocateConData( size_t Dim, SD_CON_MATRIX_DATA *pMat ) +inline int AllocateConData( int Dim, SD_CON_MATRIX_DATA *pMat ) { pMat->nRow = Dim; GD_MALLOC( pMat->pRow, SD_ROW_DATA, pMat->nRow, "Row Allocation"); @@ -442,7 +420,7 @@ int ReleaseConMatrix( SD_CON_MATRIX_DATA *pMat ) /* * INTERFACE FUNCITONS TO ACCESS THE SOLVER */ -int ds_Initialize(const size_t& MatDim, SD_MATRIX_DATA **ppMat) +int ds_Initialize(const int& MatDim, SD_MATRIX_DATA **ppMat) { SD_MATRIX_DATA *pMat = NULL; @@ -566,8 +544,10 @@ inline int SymbolicFact(SD_MATRIX_DATA *pMat) } // SymbolicFact -int ds_Solve( const SD_MATRIX_WHAT& Code, SD_MATRIX_DATA *pMat, double *X) +bool ds_Solve(const SD_MATRIX_WHAT& Code, SD_MATRIX_DATA *pMat, double *X) { + bool success = true; + // SymbolicFactorize if ( Code & SymbolicFactorize ){ if ( Code & NumericFactorize ){ @@ -603,10 +583,13 @@ int ds_Solve( const SD_MATRIX_WHAT& Code, SD_MATRIX_DATA *pMat, double *X) X[i] = 0.; } Permute( DimTot, pMat->Mat.Block.pPerm, X ); + + // Check for NaN + success = !std::isnan(X[0]); } // ResetMatrixData - if ( Code & ResetMatrixData ){ + if ( Code & ResetMatrixData ){ if ( Code != ResetMatrixData ){ USER_ERROR("You cannot reset the matrix together with other operations"); } @@ -629,27 +612,10 @@ int ds_Solve( const SD_MATRIX_WHAT& Code, SD_MATRIX_DATA *pMat, double *X) } } - return 0; + return success; } /* ds_Solve */ -/** - * @brief This function assemble the element matrix for one element and must be called for each - * (finite) element after the element connectivity have been assembled and the matrix symbolic - * factorized. To perform this task we also newly require the element incidences. The - * variable: Dim specifies the dimension of the matrix: Mat which is not required to be equal - * to the numer of element incidences: nEq. - * ATTENTION: This function do not generate a run time error if the specified incidences have - * not been previously defined. - * NOTE: If the matrix has been specified as symmetric we always use only the upper part of - * the element matrix. - * @param [in] pMat0 SD_MATRIX_DATA - * @param [in] nEq int - * @param [in] Eq int - * @param [in] Dim int - * @param [in] ElMat double - * @return int - */ int ds_AssembleMatrix(SD_MATRIX_DATA *pMat0, const int& nEq, int Eq[], const int& Dim, const double *ElMat) { SD_BLOCK_MATRIX_DATA *pMat=NULL; @@ -918,14 +884,15 @@ int InverseMatrixVector( SD_BLOCK_MATRIX_DATA *pMat, double *X ) * @param tag -- int tag value. */ static void MmdUpdate( int ehead, int neqns, int *xadj, int *adjncy, int delta, int *mdeg, - int *head, int *forward, int *backward, int *qsize, int *list, int *marker, int maxint, int *tag) + int *head, int *forward, int *backward, int *qsize, int *list, int *marker, int maxint, int *tag) { - int deg, deg0, element, enode, fnode, i, iq2, istop, - istart, j, jstop, jstart, link, mdeg0, mtag, nabor, - node, q2head, qxhead; + int deg, deg0, element, enode, fnode, i, iq2, istop, + istart, j, jstop, jstart, link, mdeg0, mtag, nabor, + node, q2head, qxhead; - mdeg0 = *mdeg + delta; - element = ehead; + + mdeg0 = *mdeg + delta; + element = ehead; n100: if ( element <= 0 ) { @@ -1160,10 +1127,11 @@ static void MmdUpdate( int ehead, int neqns, int *xadj, int *adjncy, int del static void MmdElimin(int mdeg_node, int *xadj, int *adjncy, int *head, int *forward, int *backward, int *qsize, int *list, int *marker, int maxint, int tag) { - int element, i, istop, istart, j, - jstop, jstart, link, - nabor, node, npv, nqnbrs, nxnode, - pvnode, rlmt, rloc, rnode, xqnbr; + int element, i, istop, istart, j, + jstop, jstart, link, + nabor, node, npv, nqnbrs, nxnode, + pvnode, rlmt, rloc, rnode, xqnbr; + // find the reachable set of 'mdeg_node' and // place it in the data structure. @@ -1419,13 +1387,11 @@ static void MmdNumbering(int neqns, int *perm, int *invp, int *qsize, int *nsiz * for marking nodes. * Output parameters -- * @param nsize -- number of supernodes. - * @param perm -- the minimum degree ordering. - * @param invp -- the inverse of perm. + * @param perm -- the minimum degree ordering -- used temporarily for degree backward link. + * @param invp -- the inverse of perm -- used temporarily for degree forward link. * @param ncsub -- an upper bound on the number of nonzero subscripts for the compressed storage scheme. * Working parameters -- * @param head -- vector for head of degree lists. - * @param invp -- used temporarily for degree forward link. - * @param perm -- used temporarily for degree backward link. * @param qsize -- vector for size of supernodes. * @param list -- vector for temporary linked lists. * @param marker -- a temporary marker vector. @@ -1609,7 +1575,7 @@ int ComputeTmpConMatrix(SD_CON_MATRIX_DATA *pMat0, SD_TMP_CON_MATRIX_DATA *pMat) * Set the temporary adjacency block data. For each row block process each single column * coefficients and the diagonal one. */ - for (size_t PermRow = 0; PermRow < pMat->nRow; PermRow++) { + for (int PermRow = 0; PermRow < pMat->nRow; PermRow++) { int Supernode, Row, Found; SD_COL_BLOCK_DATA **ppColBlock, *pColBlock, *pFreeColBlock, *pStartColBlock; @@ -1639,7 +1605,7 @@ int ComputeTmpConMatrix(SD_CON_MATRIX_DATA *pMat0, SD_TMP_CON_MATRIX_DATA *pMat) * the first one. */ for ( SD_COL_DATA *pCol = pRow[Row].Col; pCol; pCol = pCol->Next ) { - const size_t PermCol = pPerm[ SD_COL(pCol) ]; + const int PermCol = pPerm[ SD_COL(pCol) ]; if ( PermRow > PermCol ) { continue; } @@ -1893,21 +1859,10 @@ int ReleaseBlockMatrix( SD_BLOCK_MATRIX_DATA *pMat ) } // ReleaseBlockMatrix -/** -* @brief This function assemble the element connnectivity for one or more elements in order to build -* a sparse matrix format. Of course we only store the upper part of the connectivity matrix -* because we only consider structure symmetric matrices. - * @param pMat0 SD_MATRIX_DATA - * @param nEq int - * @param Eq (int []) - * @param nEl int - * @param Dim int - * @return int -*/ int ds_DefineConnectivity(SD_MATRIX_DATA *pMat0, const int& nEq, int Eq[], const int& nEl, const int& Dim ) { int e, i, j; - size_t Row_i; + int Row_i; SD_ROW_DATA *pRow_i; SD_CON_MATRIX_DATA *pMat; @@ -1919,7 +1874,7 @@ int ds_DefineConnectivity(SD_MATRIX_DATA *pMat0, const int& nEq, int Eq[], const pRow_i = &SD_ROW(Row_i, pMat); for (j = 0; j < nEq; j++) { - size_t Col_j, Found; + int Col_j, Found; SD_COL_DATA **ppC, *pCol; Col_j = Eq[j]; if ( Row_i == Col_j ) { @@ -1949,4 +1904,3 @@ int ds_DefineConnectivity(SD_MATRIX_DATA *pMat0, const int& nEq, int Eq[], const /* * End of SymbFact.c */ - diff --git a/third_party/snowpack/snowpackCore/Solver.h b/third_party/snowpack/snowpackCore/Solver.h index 1234124e..0afe5cf8 100644 --- a/third_party/snowpack/snowpackCore/Solver.h +++ b/third_party/snowpack/snowpackCore/Solver.h @@ -23,7 +23,7 @@ #ifndef SOLVER_H #define SOLVER_H -#include //needed for size_t +#include //needed for int /** * @file Solver.h @@ -50,7 +50,7 @@ * March 1989]. * All direct-solver (ds-)functions return 0 = FALSE if succesfull, respectively 1 = TRUE with * an error message on standard output if an error has occurred. - * + * * @author GUIDO SARTORIS ETH ZUERICH */ @@ -58,7 +58,7 @@ typedef struct { int nChunks; int pChunksSize; - char **pChunks; + char **pChunks; int TotChunkSize; } SD_CHUNK_DATA; @@ -66,7 +66,7 @@ typedef struct { int Row0; int Row1; - size_t nCol; + int nCol; int nColBlock; int iColBlock; int iFloat; @@ -74,7 +74,7 @@ typedef struct typedef struct { - size_t Dim; + int Dim; int *pPerm; int nRowBlock; SD_ROW_BLOCK_DATA *pRowBlock; @@ -89,7 +89,7 @@ typedef struct typedef struct SD_COL_DATA { - size_t Col; + int Col; struct SD_COL_DATA *Next; } SD_COL_DATA; @@ -100,7 +100,7 @@ typedef struct SD_ROW_DATA typedef struct { - size_t nRow; + int nRow; int *pPerm; int *pPermInv; int nSupernode; @@ -110,12 +110,12 @@ typedef struct SD_CHUNK_DATA PoolCol; SD_COL_DATA *FreeCol; int nFreeCol; - size_t nCol; + int nCol; } SD_CON_MATRIX_DATA; typedef struct SD_COL_BLOCK_DATA { - size_t Col0, Col1; + int Col0, Col1; struct SD_COL_BLOCK_DATA *Next; } SD_COL_BLOCK_DATA; @@ -137,7 +137,7 @@ typedef union typedef struct { - size_t nRow; + int nRow; int *pPerm; int nRowBlock; @@ -166,8 +166,8 @@ typedef struct StateType State; union { SD_CON_MATRIX_DATA Con; - SD_TMP_CON_MATRIX_DATA TmpCon; - SD_BLOCK_MATRIX_DATA Block; + SD_TMP_CON_MATRIX_DATA TmpCon; + SD_BLOCK_MATRIX_DATA Block; } Mat; } SD_MATRIX_DATA; @@ -227,8 +227,13 @@ typedef enum SD_MATRIX_WHAT * clustered together in the vector pVec. */ + /** - * @brief This function is needed for defining the system (matrix) connectivity i.e. the non-zero + * @brief This function assemble the element connnectivity for one or more elements in order to build + * a sparse matrix format. Of course we only store the upper part of the connectivity matrix + * because we only consider structure symmetric matrices. + * + * it is needed for defining the system (matrix) connectivity i.e. the non-zero * coefficients of the matrix [A] i.e. which equation is connected to which one. For each * (finite) element we have to specifies a list of equations. Here, we assume that all * equations in the list are connected to eachother and thus lead to non-zero coefficients in @@ -249,7 +254,7 @@ typedef enum SD_MATRIX_WHAT * NOTE: Except the definition of the multiplicity in ds_Initialize(), all steps performed to * define the structure of matrix [A] are stricktly independent from the multiplicity * - * @param [in] pMat0 Pointer to the matrix [A] opaque data returned by ds_Initialize() + * @param [in] pMat0 pointer to the matrix [A] opaque data returned by ds_Initialize() * @param [in] nEq No. of equations for one element forming a crique * @param [in] Eq Element list of equations for more elements with equal no. of eqs. * @param [in] nEl No. of elements ( 0 <= i "<" nEq ; 0 <= e "<" nEl ) @@ -278,11 +283,18 @@ int ds_DefineConnectivity( SD_MATRIX_DATA *const pMat0, const int& nEq, int Eq[] * and unknowns is given by: MatDim * Multiplicity * @param ppMat A pointer to an opaque data type storing data related to the matrix [A] */ -int ds_Initialize( const size_t& MatDim, SD_MATRIX_DATA **ppMat ); +int ds_Initialize( const int& MatDim, SD_MATRIX_DATA **ppMat ); + /** * @brief This function assemble the element square matrix [ElMat] for one element with nEq*M x nEq*M -* real coefficients in to the global matrix [A]. If a multiplicity factor M greather than 1 +* real coefficients in to the global matrix [A]. It must be called for each +* (finite) element after the element connectivity have been assembled and the matrix symbolic +* factorized. To perform this task we also require the element incidences. The +* variable: Dim specifies the dimension of the matrix: Mat which is not required to be equal +* to the numer of element incidences: nEq. +* +* If a multiplicity factor M greather than 1 * has been defined the numerical values in the element matrix [ElMat] must be forall vector * components clustered together. E.g. for a multiplicity of 3 i.e. a 3D vector field, the 3x3 * left-upper submatrix of [ElMat] must represent the coupling between the 3 vector field @@ -292,7 +304,9 @@ int ds_Initialize( const size_t& MatDim, SD_MATRIX_DATA **ppMat ); * connectivity has been defined with a call to: ds_DefineConnectivity(). * ATTENTION: no error is detected if some of the the element connectivity defined when * calling ds_AssembleLocalMatrix() are not included in those previously specified when -* calling ds_DefineConnectivity() +* calling ds_DefineConnectivity() (ie. if the specified incidences have +* not been previously defined). +* * If the matrix [A] has been declared as symmetric only the upper triangular part of * [ElMat], i.e. only ElMat[i][j] = ElMat[Dim*i+j] with i<=j and i,j = 0...M*nEq-1 are used * and need to be defined. In the unsymmetric case all M*nEq x M*nEq coefficients are used. It @@ -301,20 +315,22 @@ int ds_Initialize( const size_t& MatDim, SD_MATRIX_DATA **ppMat ); * After all element matrices have been assembled the matrix [A] = [L][U] can be factorised in * to the lower [L] and upper [U] tringular matrices by calling ds_Solve(NumericFactorize, * ). - * @param [in] pMat0 pointer to the matrix [A] opaque data returned by ds_Initialize() - * @param [in] nEq no. of equations for one element forming a crique - * @param [in] Eq Element list of equations for one element. - * @param [in] Dim first dimension of the 2D-array ElMat[][Dim] - * @param [in] ElMat element square matrix to be assembled in the matrix [A] +* @param [in] pMat0 pointer to the matrix [A] opaque data returned by ds_Initialize() +* @param [in] nEq no. of equations for one element forming a crique +* @param [in] Eq Element list of equations for one element. +* @param [in] Dim first dimension of the 2D-array ElMat[][Dim] +* @param [in] ElMat element square matrix to be assembled in the matrix [A] */ int ds_AssembleMatrix( SD_MATRIX_DATA *pMat0, const int& nEq, int Eq[], const int& Dim, const double *ElMat ); /** - * @param [in] Code functionlaity code defined above + * @brief This function calls the solver itself + * @param [in] Code functionlity code defined above * @param [in] pMat pointer to the matrix [A] opaque data * @param [in] pX right hand side vector {B} to be overwritten by the solution vector {X}: B[i] := X[i] + * @return false whenever the solve produced NaNs in the solution vector */ -int ds_Solve( const SD_MATRIX_WHAT& Code, SD_MATRIX_DATA *pMat, double *pX ); +bool ds_Solve(const SD_MATRIX_WHAT& Code, SD_MATRIX_DATA *pMat, double *pX); int ReleaseConMatrix( SD_CON_MATRIX_DATA * pMat ); int ReleaseBlockMatrix( SD_BLOCK_MATRIX_DATA * pMat ); diff --git a/third_party/snowpack/snowpackCore/VapourTransport.cc b/third_party/snowpack/snowpackCore/VapourTransport.cc new file mode 100644 index 00000000..dd37a3fe --- /dev/null +++ b/third_party/snowpack/snowpackCore/VapourTransport.cc @@ -0,0 +1,830 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ + + +#include "VapourTransport.h" +#include "../vanGenuchten.h" +#include "Snowpack.h" +#include "../Constants.h" + +// MeteoIO constants +#include + +#include +#include +#include + +//Eigen, note we temporarily disable Effective C++ warnings +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Weffc++" +#pragma GCC diagnostic ignored "-Wctor-dtor-privacy" +#include +#include +#include +#include +#include +#include +#include + +typedef Eigen::Triplet Trip; +#pragma GCC diagnostic pop + +using namespace mio; +using namespace std; +using namespace Eigen; + + +/** + * @page water_vapor_transport Water Vapor Transport + * + */ + +//vapour_transport_implicit_factor: 1 is fully implicit, 0 is fully explicit, 0.5 is Crank-Nicolson +const double VapourTransport::f = 1.; +const double VapourTransport::VapourTransport_timeStep = 60.; // Only used when f < 1 !! + +VapourTransport::VapourTransport(const SnowpackConfig& cfg) + : WaterTransport(cfg), RichardsEquationSolver1d(cfg, false), variant(), + iwatertransportmodel_snow(BUCKET), iwatertransportmodel_soil(BUCKET), watertransportmodel_snow("BUCKET"), watertransportmodel_soil("BUCKET"), + sn_dt(IOUtils::nodata), timeStep(IOUtils::nodata), waterVaporTransport_timeStep(IOUtils::nodata), + hoar_thresh_rh(IOUtils::nodata), hoar_thresh_vw(IOUtils::nodata), hoar_thresh_ta(IOUtils::nodata), + useSoilLayers(false), water_layer(false), enable_vapour_transport(false), + diffusionScalingFactor_(1.0), height_of_meteo_values(0.), adjust_height_of_meteo_values(true), waterVaporTransport_timeStepAdjust(false) +{ + cfg.getValue("VARIANT", "SnowpackAdvanced", variant); + + // Defines whether soil layers are used + cfg.getValue("SNP_SOIL", "Snowpack", useSoilLayers); + + //To build a thin top rain-water layer over a thin top ice layer, rocks, roads etc. + cfg.getValue("WATER_LAYER", "SnowpackAdvanced", water_layer); + + /** + * @brief No surface hoar will form for rH above threshold (1) + * - Original calibration with the 98/99 data set: 0.9 + * - r141: HOAR_THRESH_RH set to 0.9 + * - r719: HOAR_THRESH_RH set to 0.97 + */ + cfg.getValue("HOAR_THRESH_RH", "SnowpackAdvanced", hoar_thresh_rh); + + /** + * @brief No surface hoar will form at wind speeds above threshold (m s-1) + * - Original calibration with the 98/99 data set: 3.5 + * - r141: HOAR_THRESH_VW set to 3.0 + * - r242: HOAR_THRESH_VW set to 3.5 + */ + cfg.getValue("HOAR_THRESH_VW", "SnowpackAdvanced", hoar_thresh_vw); + + /** + * @brief No surface hoar will form at air temperatures above threshold (m s-1) + * - Originaly, using THRESH_RAIN + * - r787: HOAR_THRESH_TA set to 1.2 + */ + cfg.getValue("HOAR_THRESH_TA", "SnowpackAdvanced", hoar_thresh_ta); + + //Calculation time step in seconds as derived from CALCULATION_STEP_LENGTH + const double calculation_step_length = cfg.get("CALCULATION_STEP_LENGTH", "Snowpack"); + sn_dt = M_TO_S(calculation_step_length); + + //Vapour transport settings + cfg.getValue("ENABLE_VAPOUR_TRANSPORT", "SnowpackAdvanced", enable_vapour_transport); + if (enable_vapour_transport) { + // the water vapor subtime step + // If not using fully implicit scheme + if (f < 1.0) { + waterVaporTransport_timeStepAdjust = true; + waterVaporTransport_timeStep = std::min(sn_dt, VapourTransport_timeStep); + } else { + // Implicit: time step defaults to SNOWPACK time step + waterVaporTransport_timeStep = sn_dt; + } + } + + // Water transport model snow + cfg.getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", watertransportmodel_snow); + iwatertransportmodel_snow = UNDEFINED; + if (watertransportmodel_snow == "BUCKET") { + iwatertransportmodel_snow = BUCKET; + } else if (watertransportmodel_snow == "NIED") { + iwatertransportmodel_snow = NIED; + } else if (watertransportmodel_snow == "RICHARDSEQUATION") { + iwatertransportmodel_snow = RICHARDSEQUATION; + } + + // Water transport model soil + cfg.getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", watertransportmodel_soil); + iwatertransportmodel_soil = UNDEFINED; + if (watertransportmodel_soil == "BUCKET") { + iwatertransportmodel_soil = BUCKET; + } else if (watertransportmodel_soil == "NIED") { + iwatertransportmodel_soil = NIED; + } else if (watertransportmodel_soil == "RICHARDSEQUATION") { + iwatertransportmodel_soil = RICHARDSEQUATION; + } + + cfg.getValue("HEIGHT_OF_METEO_VALUES", "Snowpack", height_of_meteo_values); + cfg.getValue("ADJUST_HEIGHT_OF_METEO_VALUES", "SnowpackAdvanced", adjust_height_of_meteo_values); +} + +/** + * @brief The mass transport procedure, which serves as the primary function, is invoked from Snowpack::runSnowpackModel. \n + * NOTES: + * -# It is worth noting that the solver is highly stable with default parameters. Specifically, VAPOUR_TRANSPORT_TIMESTEP is set to 60 seconds, + * while the SNOWPACK simulation time step is 15 minutes, and VAPOUR_TRANSPORT_IMPLICIT_FACTOR is set to 1. The latter factor determines whether + * the equation is discretized in full implicit, full explicit, or a combination of the two, with a value of 1 indicating full implicit, + * and a value of 0.5 indicating Crank-Nicolson. In the case of convergence issues, reducing the height of the new-snow element controlled + * by HEIGHT_NEW_ELEM (in the .ini config file) is recommended. For sea-ice simulations, choosing BUCKET for the water transport scheme is advised + * if convergence issues arise. \n + * -# If there is no soil or snow present, vapor transport will be bypassed. + * -# If vapor transport enabled, ql is only used in vaportransport for mass tranport on top. See WaterTransport::compTransportMass. \n + * @author Mahdi Jafari + * @param Xdata + * @param ql Latent heat flux (W m-2) + * @param Sdata + * @param Mdata + */ +void VapourTransport::compTransportMass(const CurrentMeteo& Mdata, double& ql, + SnowStation& Xdata, SurfaceFluxes& Sdata) +{ + + // First, consider no soil with no snow on the ground + if (!useSoilLayers && Xdata.getNumberOfNodes() == Xdata.SoilNode+1) { + return; + } + + try { + LayerToLayer(Mdata, Xdata, Sdata, ql); + WaterTransport::adjustDensity(Xdata); + } catch(const exception&) { + prn_msg( __FILE__, __LINE__, "err", Mdata.date, "Error in transportVapourMass()"); + throw; + } +} + + +/** + * @brief This function prepares everything to solve the transient-diffusive vapor transport with phase change: \n + * NOTES: + * -# Initially, the model employs the complete latent heat flux (ql) to alter the mass of the uppermost components. + * It is crucial to note that direct usage of this mass flux as the top boundary condition for the transient-diffusive + * vapor transport solver is not feasible, both theoretically and practically. This is primarily due to + * ql's turbulent nature as a latent heat flux, while the equation lacks a convection term. + * -# It calculates water vapor diffusivity and mass tranfer coefficient. + * -# When selecting the Explicit method, sub time steps are computed to ensure a stable solution. \n + * The method then integrates compDensityProfile to complete the full SNOWPACK time step, typically set at 15 minutes. \n + * Finally, mass is explicitly added or subtracted for each element, while adhering to specific constraints. \n + * -# For more information, please check the main reference as: DOI={10.3389/feart.2020.00249} + * @author Mahdi Jafari + * @param Xdata + * @param ql Latent heat flux (W m-2) + * @param Sdata + * @param Mdata + */ +void VapourTransport::LayerToLayer(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql) +{ + // First consider surface sublimation + if (Xdata.getNumberOfElements() == 0) return; + compSurfaceSublimation(Mdata, ql, Xdata, Sdata); + + const size_t nN = Xdata.getNumberOfNodes(); + size_t nE = nN-1; + vector& NDS = Xdata.Ndata; + vector& EMS = Xdata.Edata; + std::vector deltaM(nE, 0.); // calculate the limited layer mass change + + if (!enable_vapour_transport) { + // Only deal with the remaining ql (i.e., latent heat exchange at the surface) + const double topFlux = -ql / Constants::lh_sublimation; //top layer flux (kg m-2 s-1) + const double dM = std::max(-EMS[nE-1].theta[ICE] * (Constants::density_ice * EMS[nE-1].L), -(topFlux * sn_dt)); + // Correct latent heat flux, which should become 0. at this point. HACK: note that if we cannot satisfy the ql at this point, we overestimated the latent heat from soil. + // We will not get mass from deeper layers, as to do that, one should work with enable_vapour_transport == true. + ql -= dM / sn_dt * Constants::lh_sublimation; + deltaM[nE-1] += dM; + } else { + ql=0; + + size_t e = nE; + std::vector totalMassChange(nE, 0.); // store the total mass change + std::vector oldVaporDenNode(nN, 0.); // old water vapor density for node + + std::vector factor_(nE, 1.); // this is for source term in vapor transport equation + for (size_t i = 0; i < Xdata.SoilNode; i++) { + factor_[i] = 0.; + } + + std::vector D_(nE, 0.); + for (size_t i = 0; i <= nE-1; i++) { + double theta_air = std::max(EMS[i].theta[AIR], 0.0); + double tortuosity = pow(theta_air, 7./3.) / pow(1-EMS[i].theta[SOIL], 2.); + double D_vapSoil = tortuosity * theta_air * Constants::diffusion_coefficient_in_air; + + // based on Foslien (1994) + double Dsnow = EMS[i].theta[ICE] * theta_air * Constants::diffusion_coefficient_in_air + + theta_air * Constants::diffusion_coefficient_in_air + / (EMS[i].theta[ICE] * Constants::conductivity_air + / Constants::conductivity_ice + EMS[i].theta[ICE] + * Constants::lh_sublimation * Constants::diffusion_coefficient_in_air + * dRhov_dT(EMS[i].Te) / Constants::conductivity_ice + + theta_air); + D_[i] = factor_[i] * Dsnow + (1.0 - factor_[i]) * D_vapSoil; + } + + std::vector hm_(nN, 0.); // mass transfer coefficient m s-1 + + for (size_t i = 0; i < nN; i++) { + double saturationDensity; + saturationDensity = Atmosphere::waterVaporDensity(NDS[i].T, Atmosphere::vaporSaturationPressure(NDS[i].T)); + hm_[i] = Constants::density_ice / saturationDensity / 9.7e9; // hm_experimental, Pirmin 2012, M_mm=as_all*hm_experimental*(rhov_sat-rhov) + } + + std::vector as_(nN, 0.); // the specific surface area m-1 + for (size_t i=0; i 0 && i < Xdata.SoilNode) { + double rwTors_u = pow((EMS[i].theta[WATER] + EMS[i].theta[ICE]) / EMS[i].theta[SOIL] + 1., 1. / 3.); + double rwTors_d = pow((EMS[i-1].theta[WATER] + EMS[i-1].theta[ICE]) / EMS[i-1].theta[SOIL] + 1., 1. / 3.); + double apparentTheta = 0.5 * (EMS[i].theta[SOIL] + EMS[i].theta[ICE] + EMS[i].theta[WATER]) + 0.5 * (EMS[i-1].theta[SOIL] + EMS[i-1].theta[ICE] + EMS[i-1].theta[WATER]); + as_[i] = 6.0 * apparentTheta / (0.5 * 0.002 * rwTors_d * EMS[i-1].rg + 0.5 * 0.002 * rwTors_u * EMS[i].rg); + } else if (i == Xdata.SoilNode && Xdata.SoilNode == nN-1) { + double rwTors_d = pow((EMS[i-1].theta[WATER] + EMS[i-1].theta[ICE]) / EMS[i-1].theta[SOIL] + 1., 1./3.); + double apparentTheta = EMS[i-1].theta[SOIL] + EMS[i-1].theta[ICE] + EMS[i-1].theta[WATER]; + as_[i] = 6.0 * apparentTheta / (0.002 * rwTors_d * EMS[i-1].rg); + } else if (i == Xdata.SoilNode && Xdata.SoilNode < nN-1) { + double rwTori_u = pow(EMS[i].theta[WATER] / EMS[i].theta[ICE] + 1., 1. / 3.); + double rwTors_d = pow((EMS[i-1].theta[WATER] + EMS[i-1].theta[ICE]) / EMS[i-1].theta[SOIL] + 1., 1. / 3.); + double apparentTheta = 0.5 * (EMS[i].theta[ICE] + EMS[i].theta[WATER]) + 0.5 * (EMS[i-1].theta[SOIL] + EMS[i-1].theta[ICE] + EMS[i-1].theta[WATER]); + as_[i] = 6.0 * apparentTheta / (0.5 * 0.002 * rwTors_d * EMS[i-1].rg + 0.5 * 0.001 * rwTori_u * EMS[i].ogs); + } else if (i > Xdata.SoilNode && i < nN-1) { + double rwTori_u = pow(EMS[i].theta[WATER] / EMS[i].theta[ICE] + 1., 1. / 3.); + double rwTori_d = pow(EMS[i-1].theta[WATER] / EMS[i-1].theta[ICE] + 1., 1. / 3.); + double apparentTheta = 0.5 * (EMS[i].theta[ICE] + EMS[i].theta[WATER]) + 0.5 * (EMS[i-1].theta[ICE] + EMS[i-1].theta[WATER]); + as_[i] = 6.0 * apparentTheta / (0.5 * 0.001 * rwTori_d * EMS[i-1].ogs + 0.5 * 0.001 * rwTori_u * EMS[i].ogs); + } else { // i==nN-1 + double rwTori_d = pow(EMS[i-1].theta[WATER] / EMS[i-1].theta[ICE] + 1., 1. / 3.); + double apparentTheta = EMS[i-1].theta[ICE] + EMS[i-1].theta[WATER]; + as_[i] = 6.0 * apparentTheta / (0.001 * rwTori_d * EMS[i-1].ogs); + } + } + + double min_dt = sn_dt; // first guess for the required minimum time step is the SNOWPACK time step + for (size_t i=Xdata.SoilNode; i(l) <= nTime; l++) { + time=time+timeStep; + if (time >= sn_dt) { + timeStep = sn_dt - (time - timeStep); + time = sn_dt; + } + + if (!compDensityProfile(Mdata, Xdata, hm_, as_, D_, oldVaporDenNode)) break; + + for (size_t i = 0; i <= nE-1; i++) { + double saturationVaporUp = Atmosphere::waterVaporDensity(NDS[i+1].T, Atmosphere::vaporSaturationPressure(NDS[i+1].T)); + double saturationVaporDown = Atmosphere::waterVaporDensity(NDS[i].T, Atmosphere::vaporSaturationPressure(NDS[i].T)); + double diffRhov_hm_as_Up = (f * NDS[i+1].rhov + (1 - f) * oldVaporDenNode[i+1] - saturationVaporUp) * hm_[i+1] * as_[i+1]; + double diffRhov_hm_as_Down = (f * NDS[i].rhov + (1 - f) * oldVaporDenNode[i] - saturationVaporDown) * hm_[i] * as_[i]; + totalMassChange[i] = (0.5 * diffRhov_hm_as_Down + 0.5 * diffRhov_hm_as_Up) * timeStep * EMS[i].L; //total mass change, (kg m-2 ) + } + + e = nE; + // consider the mass change due to vapour transport in snow/soil + while (e-- > 0) { + const double massPhaseChange = totalMassChange[e]+deltaM[e]; + + double dM = 0.; // mass change induced by vapor flux (kg m-2) + + // Now, the mass change is limited by: + // - we cannot remove more WATER and ICE than available + // - we cannot add more WATER and ICE than pore space available + if ( EMS[e].theta[SOIL] < Constants::eps ) { // there is no soil in element to keep element not to merge + dM = std::max(-((EMS[e].theta[WATER] - EMS[e].VG.theta_r * (1. + Constants::eps)) * Constants::density_water * EMS[e].L + (EMS[e].theta[ICE] - Snowpack::min_ice_content) * Constants::density_ice * EMS[e].L), + std::min((EMS[e].theta[AIR] * Constants::density_ice * EMS[e].L), massPhaseChange) + ); // mass change due to difference in water vapor flux (kg m-2), at most can fill the pore space. + } else { + dM = std::max(-((EMS[e].theta[WATER] - EMS[e].VG.theta_r * (1. + Constants::eps)) * Constants::density_water * EMS[e].L + EMS[e].theta[ICE] * Constants::density_ice * EMS[e].L), + std::min((EMS[e].theta[AIR] * Constants::density_ice * EMS[e].L), massPhaseChange) + ); // mass change due to difference in water vapor flux (kg m-2), at most can fill the pore space. + } + + + // If there is no pore space, or, in fact, only so much pore space to accomodate the larger volume occupied by ice when all water freezes, + // we inhibit vapour flux. This is necessary to maintain saturated conditions when present, and this is in turn necessary for the stability in the Richards equation solver. + if (EMS[e].theta[AIR] < EMS[e].theta[WATER] * (Constants::density_water / Constants::density_ice - 1.) + Constants::eps) { + dM = 0.; + } + + deltaM[e] = dM; + } + + if (time==sn_dt) break; + } + + for (size_t i = 0; i < nE; i++) { + EMS[i].vapTrans_fluxDiff = -D_[i] * (NDS[i+1].rhov-NDS[i].rhov) / EMS[i].L; + } + + double dHoar = 0.; + for (e = 0; e < nE; e++) { + EMS[e].Qmm = 0.0; + + if (deltaM[e] < 0.) { + // Mass loss: apply mass change first to water, then to ice, based on energy considerations + // We can only do this partitioning here in this "simple" way, without checking if the mass is available, because we already limited dM above, based on available ICE + WATER. + const double dTh_water = std::max((EMS[e].VG.theta_r * (1. + Constants::eps) - EMS[e].theta[WATER]), + deltaM[e] / (Constants::density_water * EMS[e].L)); + const double dTh_ice = ( deltaM[e] - (dTh_water * Constants::density_water * EMS[e].L) ) / (Constants::density_ice * EMS[e].L); + EMS[e].theta[WATER] += dTh_water; + EMS[e].theta[ICE] += dTh_ice; + + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dTh_water * Constants::density_water * EMS[e].L; + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dTh_ice * Constants::density_ice * EMS[e].L; + EMS[e].M += dTh_water * Constants::density_water * EMS[e].L+dTh_ice * Constants::density_ice * EMS[e].L; + assert(EMS[e].M >= (-Constants::eps2)); // mass must be positive + + EMS[e].Qmm += (dTh_water * Constants::density_water * Constants::lh_vaporization + + dTh_ice * Constants::density_ice * Constants::lh_sublimation + ) / sn_dt; // [w/m^3] + + // If present at surface, surface hoar is sublimated away + if (e == nE-1 && deltaM[e]<0) { + dHoar = std::max(-NDS[nN-1].hoar, deltaM[e]); + } + } else { // Mass gain: add water in case temperature at or above melting point, ice otherwise + if (EMS[e].Te >= EMS[e].meltfreeze_tk) { + EMS[e].theta[WATER] += deltaM[e] / (Constants::density_water * EMS[e].L); + EMS[e].Qmm += (deltaM[e]*Constants::lh_vaporization)/sn_dt/EMS[e].L; // [w/m^3] + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += deltaM[e]; + } else { + EMS[e].theta[ICE] += deltaM[e] / (Constants::density_ice * EMS[e].L); + EMS[e].Qmm += (deltaM[e]*Constants::lh_sublimation)/sn_dt/EMS[e].L; // [w/m^3] + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += deltaM[e]; + } + EMS[e].M += deltaM[e]; + assert(EMS[e].M >= (-Constants::eps2)); // mass must be positive + } + + + EMS[e].theta[AIR] = std::max(1. - EMS[e].theta[WATER] - EMS[e].theta[WATER_PREF] - EMS[e].theta[ICE] - EMS[e].theta[SOIL], 0.); + if (std::fabs(EMS[e].theta[AIR]) < 1.e-15) { + EMS[e].theta[AIR] = 0; + } + EMS[e].updDensity(); + assert(EMS[e].Rho > 0 || EMS[e].Rho == IOUtils::nodata); // density must be positive + if (!(EMS[e].Rho > Constants::eps && EMS[e].theta[AIR] >= 0. && EMS[e].theta[WATER] <= 1. + Constants::eps && EMS[e].theta[ICE] <= 1. + Constants::eps)) { + prn_msg(__FILE__, __LINE__, "err", Date(), + "Volume contents: e=%d nE=%d rho=%lf ice=%lf wat=%lf wat_pref=%lf soil=%lf air=%le", e, nE, EMS[e].Rho, EMS[e].theta[ICE], + EMS[e].theta[WATER], EMS[e].theta[WATER_PREF], EMS[e].theta[SOIL], EMS[e].theta[AIR]); + throw IOException("Cannot evaluate mass balance in vapour transport LayerToLayer routine", AT); + } + + // some useful output in case of vapor transport + double sVaporDown = Atmosphere::waterVaporDensity(NDS[e].T, Atmosphere::vaporSaturationPressure(NDS[e].T)); + double sVaporUp = Atmosphere::waterVaporDensity(NDS[e+1].T, Atmosphere::vaporSaturationPressure(NDS[e+1].T)); + EMS[e].vapTrans_underSaturationDegree = (0.5*(NDS[e].rhov-sVaporDown)+0.5*(NDS[e+1].rhov-sVaporUp))/(0.5*sVaporDown+0.5*sVaporUp); + EMS[e].vapTrans_cumulativeDenChange += deltaM[e]/EMS[e].L; + EMS[e].vapTrans_snowDenChangeRate = deltaM[e]/EMS[e].L/sn_dt; + } + + Sdata.hoar += dHoar; + NDS[nN-1].hoar += dHoar; + if (NDS[nN-1].hoar < 0.) { + NDS[nN-1].hoar = 0.; + } + + } +} + +/** + * @brief Calculate the surface sublimation / deposition (i.e., only gas-solid). \n + * The fraction of the latent heat flux ql that has not been used so far will be used for + * sublimation/deposition. If positive (and above a certain cutoff level) then there + * is a possibility that surface hoar crystal have grown. Of course, if negative + * then we are also loosing mass from the surface.\n + * This function additionally takes care of surface hoar formation and destruction. + * Note that surface hoar is a nodal property, altough the corresponding mass is carried + * by the underlying element. + * @param *Mdata + * @param ql Latent heat flux (W m-2) + * @param *Xdata + * @param *Sdata + */ +void VapourTransport::compSurfaceSublimation(const CurrentMeteo& Mdata, double& ql, SnowStation& Xdata, SurfaceFluxes& Sdata) +{ + double dM, M; // Length and mass changes, and Initial mass and volumetric content (water or ice) + double dHoar = 0.; // Actual change in hoar mass + double cH_old; // Temporary variable to hold height of snow + + const size_t nN = Xdata.getNumberOfNodes(); + size_t nE = nN-1; + vector& NDS = Xdata.Ndata; + vector& EMS = Xdata.Edata; + const double Tss = NDS[nE].T; // Surface Temperature + + /* + * If ql > 0: + * Surface hoar is formed when surface temperature is below freezing. + * If no surface hoar can be formed, ql is kept and is used as boundary condition + * when calculating vapour flux. + * If there are elements and ql < 0: + * If ql is large enough to remove full surface elements, remove them. + * left over ql is used as boundary condition when calculating vapour flux. + * + * In both cases: add/subtract mass to MS_SUBLIMATION + */ + if (ql > Constants::eps2) { // Add Mass + const double meltfreeze_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].meltfreeze_tk : Constants::meltfreeze_tk; + if (Tss < meltfreeze_tk) { // Add Ice + dM = ql*sn_dt/Constants::lh_sublimation; + // If rh is very close to 1, vw too high or ta too high, surface hoar is destroyed and should not be formed + if (!((Mdata.rh > hoar_thresh_rh) || (Mdata.vw > hoar_thresh_vw) || (Mdata.ta >= IOUtils::C_TO_K(hoar_thresh_ta)))) { + // Under these conditions, form surface hoar + ql = 0.; + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dM; + dHoar = dM; + + // In this case adjust properties of element, keeping snow density constant + const double L_top = EMS[nE-1].L; + const double theta_i0 = EMS[nE-1].theta[ICE]; + double dL = dM/(EMS[nE-1].Rho); // length change + if (nE == Xdata.SoilNode) { + dL = 0.; + dM = std::min(dM,EMS[nE-1].theta[AIR]*(Constants::density_ice*EMS[nE-1].L)); + } + NDS[nE].z += dL + NDS[nE].u; NDS[nE].u = 0.0; + EMS[nE-1].L0 = EMS[nE-1].L = L_top + dL; + EMS[nE-1].E = EMS[nE-1].Eps = EMS[nE-1].dEps = EMS[nE-1].Eps_e = EMS[nE-1].Eps_v = EMS[nE-1].S = 0.0; + EMS[nE-1].theta[ICE] *= L_top/EMS[nE-1].L; + EMS[nE-1].theta[ICE] += dM/(Constants::density_ice*EMS[nE-1].L); + EMS[nE-1].theta[ICE] = std::max(0., std::min(1., EMS[nE-1].theta[ICE])); + EMS[nE-1].theta[WATER] *= L_top/EMS[nE-1].L; + EMS[nE-1].theta[WATER] = std::max(0., std::min(1., EMS[nE-1].theta[WATER])); + EMS[nE-1].theta[WATER_PREF] *= L_top/EMS[nE-1].L; + EMS[nE-1].theta[WATER_PREF] = std::max(0., std::min(1., EMS[nE-1].theta[WATER_PREF])); + + for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { + EMS[nE-1].conc[ICE][ii] *= L_top*theta_i0/(EMS[nE-1].theta[ICE]*EMS[nE-1].L); + } + + EMS[nE-1].M += dM; + assert(EMS[nE-1].M >= (-Constants::eps2)); //mass must be positive + + // Update remaining volumetric contents and density + EMS[nE-1].theta[AIR] = std::max(0., 1.0 - EMS[nE-1].theta[WATER] - EMS[nE-1].theta[WATER_PREF] - EMS[nE-1].theta[ICE] - EMS[nE-1].theta[SOIL]); + EMS[nE-1].updDensity(); + } + } + } else if ((ql < (-Constants::eps2)) && (nE > 0)) { + // If ql < 0, SUBLIMATE mass off + std::vector M_Solutes(Xdata.number_of_solutes, 0.); // Mass of solutes from disappearing phases + size_t e = nE; + while ((e > 0) && (ql < (-Constants::eps2))) { // While energy is available + e--; + /* + * Determine the amount of potential sublimation and collect some variables + * that will be continuously used: L0 and M + */ + const double L0 = EMS[e].L; + const double theta_i0 = EMS[e].theta[ICE]; + + M = theta_i0 * Constants::density_ice * L0; + dM = ql * sn_dt / Constants::lh_sublimation; + + if (-dM > M) { + // Only if mass change is sufficient to remove the full element + dM = -M; + // Add solutes to Storage + for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { + M_Solutes[ii] += EMS[e].conc[ICE][ii]*theta_i0*L0; + } + EMS[e].theta[ICE] = 0.; + + EMS[e].M += dM; + Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dM; + ql -= dM*Constants::lh_sublimation/sn_dt; // Update the energy used + + // If present at surface, surface hoar is sublimated away + if (e == nE-1) { + dHoar = std::max(-NDS[nN-1].hoar, dM); + } + + // Update remaining volumetric contents and density + EMS[e].theta[AIR] = std::max(0., 1.0 - EMS[e].theta[WATER] - EMS[e].theta[WATER_PREF] - EMS[e].theta[ICE] - EMS[e].theta[SOIL]); + EMS[e].updDensity(); + // Merge the element if it is a snow layer. This will take care of possible left over liquid water (will be put one layer down) + // Keep layer if it is a soil layer inside the snowpack (for example with snow farming) + if (e >= Xdata.SoilNode) { + if (EMS[e].theta[SOIL] < Constants::eps) { + if (e > 0) SnowStation::mergeElements(EMS[e-1], EMS[e], false, true); + // Now reduce the number of elements by one. + nE--; + } + //In case e==Xdata.SoilNode, we removed the last snow element and we should break out of the loop. + if (e == Xdata.SoilNode) break; + } + } else { + // Not enough energy anymore to remove complete element, so we should break out of the loop. + break; + } + + //check that thetas and densities are consistent + assert(EMS[e].theta[SOIL] >= (-Constants::eps2) && EMS[e].theta[SOIL] <= (1.+Constants::eps2)); + assert(EMS[e].theta[ICE] >= (-Constants::eps2) && EMS[e].theta[ICE]<=(1.+Constants::eps2)); + assert(EMS[e].theta[WATER] >= (-Constants::eps2) && EMS[e].theta[WATER]<=(1.+Constants::eps2)); + assert(EMS[e].theta[WATER_PREF] >= (-Constants::eps2) && EMS[e].theta[WATER_PREF]<=(1.+Constants::eps2)); + assert(EMS[e].theta[AIR] >= (-Constants::eps2) && EMS[e].theta[AIR]<=(1.+Constants::eps2)); + assert(EMS[e].Rho >= (-Constants::eps2) || EMS[e].Rho==IOUtils::nodata); //we want positive density + } + + // Now take care of left over solute mass. + if (nE == Xdata.SoilNode) { // Add Solute Mass to Runoff TODO HACK CHECK + for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { + Sdata.load[ii] += M_Solutes[ii]/S_TO_H(sn_dt); + } + } else { // Add Solute Mass to Element below + if (EMS[e].theta[WATER] > 0.) { + for(size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { + EMS[e].conc[WATER][ii] += M_Solutes[ii]/EMS[e].theta[WATER]/EMS[e].L; + } + } else if (EMS[e].theta[ICE] > 0.) { + for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { + EMS[e].conc[ICE][ii] += M_Solutes[ii]/EMS[e].theta[ICE]/EMS[e].L; + } + } else { + for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { + EMS[e].conc[SOIL][ii] += M_Solutes[ii]/EMS[e].theta[SOIL]/EMS[e].L; + } + } + } + Xdata.reduceNumberOfElements(nE); + } + + // HACK: this code is under verification. The comment reads "surface hoar *is* destroyed, but the next line says surface hoar *may be* destroyed, depending on the sign of the latent heat flux. + // If the code is correct, we can delete this part, if the comment is correct, we should modify the code to read: hoar = -NDS[nE].hoar; + // Check for surface hoar destruction or formation (once upon a time ml_sn_SurfaceHoar) + /*if ((Mdata.rh > hoar_thresh_rh) || (Mdata.vw > hoar_thresh_vw) || (Mdata.ta >= IOUtils::C_TO_K(hoar_thresh_ta))) { + //if rh is very close to 1, vw too high or ta too high, surface hoar is destroyed + hoar = std::min(hoar, 0.); + }*/ + if (Xdata.getNumberOfElements() == 0) return; + + Sdata.hoar += dHoar; + NDS[nN-1].hoar += dHoar; + if (NDS[nN-1].hoar < 0.) { + NDS[nN-1].hoar = 0.; + } + + // Surface hoar cannot exist when the top element is wet + if (nE > 0) { + const double theta_r = ((iwatertransportmodel_snow == RICHARDSEQUATION && nE-1>=Xdata.SoilNode) || (iwatertransportmodel_soil==RICHARDSEQUATION && nE-1 theta_r) { + NDS[nE].hoar = 0.; + } + } + + // At the end also update the overall height + cH_old = Xdata.cH; + Xdata.cH = NDS[Xdata.getNumberOfNodes()-1].z + NDS[Xdata.getNumberOfNodes()-1].u; + if (Xdata.mH!=Constants::undefined) Xdata.mH -= std::min(Xdata.mH - Xdata.Ground, (cH_old - Xdata.cH)); // TODO/HACK: why is this correction for Xdata.mH necessary? +} + +/** + * @brief This function is the solver for discretized transient-diffusive vapor tranport equation. + * NOTES: + * -# Note, for the case of only snow (no soil), bottomDirichletBCtype is set to Drichlet ans Neumann does not make sense \n + * -# The system of equations forms a tridiagonal sparse matrix for which the sparse solvers from the Eigen C++ library are used. \n + * Here, we used quite well stabel solver as BiCGSTAB. Feel free to use other solvers by looking at Eigen documentaion. + * -# When selecting the Explicit method, sub time steps are computed to ensure a stable solution. \n + * The method then integrates compDensityProfile to complete the full SNOWPACK time step, typically set at 15 minutes. \n + * Finally, mass is explicitly added or subtracted for each element, while adhering to specific constraints. \n + * @author Mahdi Jafari + * @param Xdata + * @param Mdata + * @param D_el, water vapor diffusivity (m2 s-1) + * @param hm_, mass tranfer coefficient (m s-1) + * @param as_, specific surface area (m-1) + * @param oldVaporDenNode, old vapor denisty stored for nodes + * @return a flag to check if the solution has converged + */ +bool VapourTransport::compDensityProfile(const CurrentMeteo& Mdata, SnowStation& Xdata, + std::vector& hm_, + std::vector& as_, + const std::vector& D_el, + std::vector& oldVaporDenNode) +{ + const bool bottomDirichletBCtype = (Xdata.SoilNode == 0 && variant != "SEAICE") ? (true) : (false); + + const size_t nN = Xdata.getNumberOfNodes(); + size_t nE = nN-1; + vector& NDS = Xdata.Ndata; + vector& EMS = Xdata.Edata; + const size_t nX = nN; // number of unknowns + + BiCGSTAB > solver; // Built-in iterative solver + + SparseMatrix A(nX,nX); + std::vector tripletList(nX); + VectorXd b(nX); + VectorXd xx(nX); + + // grid + std::vector z(nN,0.); + for (size_t i = 0; i < nN; i++) { + z[i] = NDS[i].z; + } + + // initial values + std::vector D_(nN, Constants::diffusion_coefficient_in_air); + for(size_t i=0; i eps_(nN, 1.0); + for(size_t i=0; i(k), static_cast(k), v_ij)); // Set up the matrix diagonal + + v_ij = f * -2.0 * eps_[k] * D_[k] / dz_u / (dz_u + dz_d); + tripletList.push_back(Trip(static_cast(k), static_cast(k) + 1, v_ij)); // Set up the matrix upper diagonals, k+1 + + v_ij = f * -2.0 * eps_[k-1] * D_[k-1] / dz_d / (dz_u + dz_d); + tripletList.push_back(Trip(static_cast(k), static_cast(k) - 1, v_ij)); // Set up the matrix lower diagonals, k-1 + } if (k == nN-1) { + // Normal top B.C. assuming satuarion condition for the uppermost node of snowpack + b[k] = saturationDensity; + v_ij = 1.0; + tripletList.push_back(Trip(static_cast(k), static_cast(k), v_ij)); // Set up the matrix diagonal + } if (k == 0) { + if (bottomDirichletBCtype) { + b[k] = saturationDensity; // NDS[k].rhov; + v_ij = 1.0; + tripletList.push_back(Trip(static_cast(k), static_cast(k), v_ij)); // Set up the matrix diagonal + } else { + b[k] = 0.0; + v_ij = -1.0; + tripletList.push_back(Trip(static_cast(k), static_cast(k), v_ij)); // Set up the matrix diagonal + + v_ij = 1.0; + tripletList.push_back(Trip(static_cast(k), static_cast(k) + 1, v_ij));// Set up the matrix upper diagonals, k+1 + } + } + } + + A.setFromTriplets(tripletList.begin(), tripletList.end()); + tripletList.clear(); + A.makeCompressed(); + + solver.compute(A); + if (solver.info() != Success) { + std::ostringstream err_msg; + err_msg << "Error computing 'A' with Eigen: " << Mdata.date << solver.info(); + throw mio::IOException(err_msg.str(), AT); + } + + // Solve the equation + xx = solver.solve(b); + if (solver.info() != Success) { + std::ostringstream err_msg; + err_msg << "Error solving 'b' with Eigen: " << Mdata.date << solver.info(); + throw mio::IOException(err_msg.str(), AT); + } + + for (size_t k = 0; k <= nN-1; k++) { + oldVaporDenNode[k]=NDS[k].rhov; + NDS[k].rhov=xx(k); + double error = std::abs(NDS[k].rhov-oldVaporDenNode[k]); + if(NDS[k].rhov<0) { + std::ostringstream err_msg; + err_msg << "Error, rhov is below zero (" << NDS[k].rhov << "). Can not proceed."; + throw mio::IOException(err_msg.str(), AT); + } + error_max = std::max(error_max, error); + saturationDensity = Atmosphere::waterVaporDensity(NDS[k].T, Atmosphere::vaporSaturationPressure(NDS[k].T)); + } + + break; + } while (error_max > 1.e-6); + + for (size_t e = 0; e < nE; e++) { + EMS[e].rhov = (NDS[e].rhov + NDS[e+1].rhov) / 2.0; + } + + return true; +} + +/** + * @brief Calculate the derivative of vapor saturation pressure of a flat ice surface with respect to temperature.\n + * Refer to MeteoIO::Atmosphere::vaporSaturationPressure() for the non-derived method + * in the MeteoIO library. + * @param Tem Temperature in Kelvin + * @return Derivative of vapor saturation pressure with respect to temperature + */ +double VapourTransport::dRhov_dT(const double Tem) +{ + /* Use the constants of a flat ice surface. + * See Murray, F. W., "On the computation of saturation vapor pressure", 1966, J. Appl. Meteor., 6, 203–204, + * doi: 10.1175/1520-0450(1967)006<0203:OTCOSV>2.0.CO;2. + */ + const double sat_vapor_pressure_ice_a = 21.8745584; + const double sat_vapor_pressure_ice_b = 7.66; + + const double dRhov_dT = (mio::Cst::water_molecular_mass * mio::Cst::p_water_triple_pt / mio::Cst::gaz_constant / Tem) + * exp(sat_vapor_pressure_ice_a * (Tem - mio::Cst::t_water_triple_pt) / (Tem - sat_vapor_pressure_ice_b)) + * (-1. / Tem + (sat_vapor_pressure_ice_a * (Tem - sat_vapor_pressure_ice_b) - sat_vapor_pressure_ice_a * (Tem - mio::Cst::t_water_triple_pt)) / (Tem - sat_vapor_pressure_ice_b) / (Tem - sat_vapor_pressure_ice_b)); + + return dRhov_dT; +} +/* + * End of VapourTransport.cc + */ diff --git a/third_party/snowpack/snowpackCore/VapourTransport.h b/third_party/snowpack/snowpackCore/VapourTransport.h new file mode 100644 index 00000000..70637175 --- /dev/null +++ b/third_party/snowpack/snowpackCore/VapourTransport.h @@ -0,0 +1,84 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ +/** + * @file VapourTransport.h + */ + +#ifndef VAPOURTRANSPORT_H +#define VAPOURTRANSPORT_H + +#include "../Constants.h" +#include "../DataClasses.h" +#include "../Laws_sn.h" +#include "ReSolver1d.h" +#include "WaterTransport.h" +#include "Snowpack.h" +#include "PhaseChange.h" +#include "../Meteo.h" +#include "../Utils.h" +#include "Solver.h" +#include "../Constants.h" +#include "../Laws_sn.h" +#include "../SnowDrift.h" +#include "Metamorphism.h" + +#include + + +/** + * @class VapourTransport + * @version 1.0 + * @brief This module contains water vapour transport routines for the 1d snowpack model + */ +class VapourTransport : public WaterTransport { + public: + VapourTransport(const SnowpackConfig& cfg); + void compTransportMass(const CurrentMeteo& Mdata, double& ql, SnowStation& Xdata, SurfaceFluxes& Sdata); + + private: + bool compDensityProfile(const CurrentMeteo& Mdata, SnowStation& Xdata, + std::vector& hm_, std::vector& as_, + const std::vector& D_el, std::vector& oldVaporDenNode); + void compSurfaceSublimation(const CurrentMeteo& Mdata, double& ql, SnowStation& Xdata, SurfaceFluxes& Sdata); + void LayerToLayer(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql); + double dRhov_dT(const double Tem); + + ReSolver1d RichardsEquationSolver1d; + + std::string variant; + + watertransportmodels iwatertransportmodel_snow, iwatertransportmodel_soil; + + std::string watertransportmodel_snow; + std::string watertransportmodel_soil; + double sn_dt, timeStep, waterVaporTransport_timeStep; + const static double VapourTransport_timeStep; + double hoar_thresh_rh, hoar_thresh_vw, hoar_thresh_ta; + bool useSoilLayers, water_layer; + + bool enable_vapour_transport; + double diffusionScalingFactor_, height_of_meteo_values; + bool adjust_height_of_meteo_values; + + const static double f; + + bool waterVaporTransport_timeStepAdjust; +}; +#endif // End of VapourTransport.h} diff --git a/third_party/snowpack/snowpackCore/WaterTransport.cc b/third_party/snowpack/snowpackCore/WaterTransport.cc index 9769052c..b1773d61 100644 --- a/third_party/snowpack/snowpackCore/WaterTransport.cc +++ b/third_party/snowpack/snowpackCore/WaterTransport.cc @@ -19,24 +19,60 @@ */ #include "WaterTransport.h" +#include "Snowpack.h" +#include "ReSolver1d.h" +#include "PhaseChange.h" #include "../Constants.h" #include "../Utils.h" -#include "PhaseChange.h" -#include "ReSolver1d.h" -#include "Snowpack.h" #include using namespace std; using namespace mio; +/** + * @page water_transport Water Transport + * It is important to realize that snow is really a three phases medium: it might contain at the same time water in its solid phase (the ice crystals matrix), + * in its liquid phase (interstitial water) and in its gaseous phase (water vapor in the pore space). As the liquid water can move through the + * ice matrix, it transports mass as well as potentially energy. Depending on the conditions, it might also significantly alter the microstructure of + * the snow pack. Therefore it is very important to be able to simulate how this liquid water moves through the snow layers. + * + * @section matrix_vs_pref Matrix flow and preferential flow + * Liquid water can move through the snow pack in two distinct ways: through matrix flow or through preferential flow. They are fundamentally different and + * have very different time scales. + * + * @subsection matrix_flow Matrix flow + * Matrix flow represents how the liquid water moves through the pore space of the ice matrix. This capillary motion is dominated by surface tension effects. Such a flow + * is highly dependent on the tortuosity of the ice matrix, the water column pressure head and changes of such properties which can lead to capillary barriers. The kind + * of flow moves the bulk of the mass in a gradual process. + * + * @subsection preferential_flow Preferential flow + * A second kind of liquid water transport mechanism is through preferential flow. This is a 2 dimensional effect where at some places the liquid water is able to locally + * flow much deeper into the snow pack. Although highly relevant for its impact on the snow microstructure and for its impact on snow stability, this transport mechanism + * only carries a minority of the liquid water mass. But by providing liquid water in deeper layers much faster, it can contribute to triggering a weak layer or accumulate + * liquid water at a capillary barrier (ponding) that could later refreeze and build an ice layer. + * + * @subsection wt_modeling Modeling + * In %Snowpack, water transport can currently either be modeled with the bucket approach or by solving the Richards equations. + * + * In the bucket approach, each snow layer has a given water storage capacity that can be filled by liquid water (thus similar to a bucket) and then overflows + * down to the next layer when full. This is computationally efficient but not a very accurate representation of the physical phenomenons involved in the liquid water transport. + * + * On the other hand, the Richards equation describes the flow of a liquid in a porous media and is therefore a much more adequate representation. The novelty of the Richards + * equation solver in %Snowpack is to use such equations in a media where the matrix is just a different phase of the liquid. This is computationally much more challenging + * than the bucket approach and needs to be much more carefully configured. + * + * @section snowpack_wt_keys Configuration keys + * + */ + WaterTransport::WaterTransport(const SnowpackConfig& cfg) - : RichardsEquationSolver1d(cfg), variant(), - iwatertransportmodel_snow(BUCKET), iwatertransportmodel_soil(BUCKET), watertransportmodel_snow("BUCKET"), watertransportmodel_soil("BUCKET"), + : RichardsEquationSolver1d_matrix(cfg, true), RichardsEquationSolver1d_pref(cfg, false), variant(), + iwatertransportmodel_snow(BUCKET), iwatertransportmodel_soil(BUCKET), watertransportmodel_snow("BUCKET"), watertransportmodel_soil("BUCKET"), enable_pref_flow(false), pref_flow_rain_input_domain("MATRIX"), sn_dt(IOUtils::nodata), hoar_thresh_rh(IOUtils::nodata), hoar_thresh_vw(IOUtils::nodata), hoar_thresh_ta(IOUtils::nodata), hoar_density_buried(IOUtils::nodata), hoar_density_surf(IOUtils::nodata), hoar_min_size_buried(IOUtils::nodata), - minimum_l_element(IOUtils::nodata), useSoilLayers(false), water_layer(false), jam(false) + minimum_l_element(IOUtils::nodata), comb_thresh_l(IOUtils::nodata), useSoilLayers(false), water_layer(false), jam(false), enable_vapour_transport(false) { cfg.getValue("VARIANT", "SnowpackAdvanced", variant); @@ -61,7 +97,7 @@ WaterTransport::WaterTransport(const SnowpackConfig& cfg) * - r242: HOAR_THRESH_VW set to 3.5 */ cfg.getValue("HOAR_THRESH_VW", "SnowpackAdvanced", hoar_thresh_vw); - + /** * @brief No surface hoar will form at air temperatures above threshold (m s-1) * - Originaly, using THRESH_RAIN @@ -88,9 +124,15 @@ WaterTransport::WaterTransport(const SnowpackConfig& cfg) //Minimum element length (m) cfg.getValue("MINIMUM_L_ELEMENT", "SnowpackAdvanced", minimum_l_element); + double dummy_height_new_elem; //only temporarily needed + cfg.getValue("HEIGHT_NEW_ELEM", "SnowpackAdvanced", dummy_height_new_elem); + cfg.getValue("COMB_THRESH_L", "SnowpackAdvanced", comb_thresh_l, IOUtils::nothrow); + if(comb_thresh_l == IOUtils::nodata) comb_thresh_l = SnowStation::comb_thresh_l_ratio * dummy_height_new_elem; // If no comb_thres_l specified, use the default one (i.e., a fixed ratio from height_new_elem) + //Water transport model snow cfg.getValue("WATERTRANSPORTMODEL_SNOW", "SnowpackAdvanced", watertransportmodel_snow); iwatertransportmodel_snow=UNDEFINED; + enable_pref_flow=false; if (watertransportmodel_snow=="BUCKET") { iwatertransportmodel_snow=BUCKET; } else if (watertransportmodel_snow=="NIED") { @@ -98,6 +140,20 @@ WaterTransport::WaterTransport(const SnowpackConfig& cfg) } else if (watertransportmodel_snow=="RICHARDSEQUATION") { iwatertransportmodel_snow=RICHARDSEQUATION; } + cfg.getValue("PREF_FLOW", "SnowpackAdvanced", enable_pref_flow); + if (enable_pref_flow && watertransportmodel_snow!="RICHARDSEQUATION") { + prn_msg( __FILE__, __LINE__, "err", Date(), "PREF_FLOW = TRUE requires WATERTRANSPORTMODEL_SNOW = RICHARDSEQUATION. Preferential flow is only implemented as an extension of Richards equation."); + throw; + } + if(enable_pref_flow) { + cfg.getValue("PREF_FLOW_RAIN_INPUT_DOMAIN", "SnowpackAdvanced", pref_flow_rain_input_domain); + if(pref_flow_rain_input_domain != "MATRIX" && pref_flow_rain_input_domain != "PREF_FLOW") { + prn_msg( __FILE__, __LINE__, "err", Date(), "PREF_FLOW_RAIN_INPUT_DOMAIN is expected to be MATRIX or PREF_FLOW (mind the upper case!)."); + } + } else { + // Enforce the rain water into the matrix domain, in case PREF_FLOW model is not enabled. + pref_flow_rain_input_domain="MATRIX"; + } //Water transport model soil cfg.getValue("WATERTRANSPORTMODEL_SOIL", "SnowpackAdvanced", watertransportmodel_soil); @@ -109,8 +165,11 @@ WaterTransport::WaterTransport(const SnowpackConfig& cfg) } else if (watertransportmodel_soil=="RICHARDSEQUATION") { iwatertransportmodel_soil=RICHARDSEQUATION; } -} + //Enable vapour transport + cfg.getValue("ENABLE_VAPOUR_TRANSPORT", "SnowpackAdvanced", enable_vapour_transport); + +} /** @@ -249,30 +308,20 @@ void WaterTransport::KHCalcNaga(const double RG, const double Dens, double ThR, } /** - * @brief This part of the code is EXTREMELY IMPORTANT -- especially for predicting SURFACE HOAR and BURIED DEPTH HOAR layers \n - * The total latent heat flux is predicted. If positive (and above a certain cutoff level) then there - * is a good possibility that SURFACE HOAR crystal have grown. Of course, if negative - * then we are also loosing mass from the surface. These are seperate routines since - * they might want to be changed or updated in future. \n - * Just before his wedding, when Michael was implementing solute transport as initiated by - * Peter Waldner, he realized that sublimation and evaporation was not possible from blank - * soil layers. So he changed the routine on 29 of April 2002. \n - * Another very important case of WATER movement through the snowpack is the SUBLIMATION of - * VAPOR; this piece of code was taken from phase change and placed here because there is the - * good possibility that the an ELEMENT might be SUBLIMATED away. \n - * TODO Revise description! - * @param *Xdata + * @brief This function deals with the top flux for the bucket water transport scheme.\n + * Determines the fraction of the latent heat flux ql that can be used for evaporation or + * condensation. IMPORTANT: sublimation/deposition is treated by VapourTransport. + * The variable ql is updated with the amount used for evaporation/condensation, such that + * VapourTransport should interpret all remaining energy as sublimation/deposition and + * additionally take care of surface hoar formation/destruction. * @param ql Latent heat flux (W m-2) + * @param *Xdata * @param *Sdata - * @param *Mdata */ -void WaterTransport::compSurfaceSublimation(const CurrentMeteo& Mdata, double ql, SnowStation& Xdata, - SurfaceFluxes& Sdata) +void WaterTransport::compTopFlux(double& ql, SnowStation& Xdata, SurfaceFluxes& Sdata) { - double dL=0., dM=0.; // Length and mass chamges - double M=0.; // Initial mass and volumetric content (water or ice) - double hoar=0.0; // Actual change in hoar mass - double cH_old; // Temporary variable to hold height of snow + double dM = 0.; // Mass changes + double M = 0.; // Initial mass and volumetric content (water or ice) const size_t nN = Xdata.getNumberOfNodes(); const size_t nE = nN-1; @@ -283,44 +332,24 @@ void WaterTransport::compSurfaceSublimation(const CurrentMeteo& Mdata, double ql /* * If there are elements and ql > 0: * update densities and volumetric contents (ELEMENT data), - * add/subtract mass to MS_SUBLIMATION and/or MS_EVAPORATION, - * potential surface hoar formation will be tested at the end of this routine (NODAL data); - */ + * add/subtract mass to MS_EVAPORATION, + * potential surface hoar formation/destruction is tested in VapourTransport. + */ if (ql > Constants::eps2) { // Add Mass - const double melting_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].melting_tk : Constants::melting_tk; - if (Tss < melting_tk) { // Add Ice - dM = ql*sn_dt/Constants::lh_sublimation; - ql=0.; - Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dM; - hoar = dM; - - // In this case adjust properties of element, keeping snow density constant - const double L_top = EMS[nE-1].L; - const double theta_i0 = EMS[nE-1].theta[ICE]; - dL = dM/(EMS[nE-1].Rho); // length change - if (nE == Xdata.SoilNode) { - dL = 0.; - dM = std::min(dM,EMS[nE-1].theta[AIR]*(Constants::density_ice*EMS[nE-1].L)); - } - NDS[nE].z += dL + NDS[nE].u; NDS[nE].u = 0.0; - EMS[nE-1].L0 = EMS[nE-1].L = L_top + dL; - EMS[nE-1].E = EMS[nE-1].Eps = EMS[nE-1].dEps = EMS[nE-1].Eps_e = EMS[nE-1].Eps_v = EMS[nE-1].S = 0.0; - EMS[nE-1].theta[ICE] *= L_top/EMS[nE-1].L; - EMS[nE-1].theta[ICE] += dM/(Constants::density_ice*EMS[nE-1].L); - EMS[nE-1].theta[WATER] *= L_top/EMS[nE-1].L; - - for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { - EMS[nE-1].conc[ICE][ii] *= L_top*theta_i0/(EMS[nE-1].theta[ICE]*EMS[nE-1].L); - } - } else { // Add water + const double meltfreeze_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].meltfreeze_tk : Constants::meltfreeze_tk; + if (!(Tss < meltfreeze_tk)) { + // Add water if ((iwatertransportmodel_snow != RICHARDSEQUATION && nE>Xdata.SoilNode) || (iwatertransportmodel_soil != RICHARDSEQUATION && nE==Xdata.SoilNode)) { //NANDER: check if the upper element is not part of the domain solved by the Richards Equation, because if so, we should put it in the surface flux // Add Water const double theta_w0 = EMS[nE-1].theta[WATER]; dM = ql*sn_dt/Constants::lh_vaporization; - ql=0.; + ql = 0.; Sdata.mass[SurfaceFluxes::MS_EVAPORATION] += dM; if (nE == Xdata.SoilNode) { + Sdata.mass[SurfaceFluxes::MS_EVAPORATION] -= dM; dM = std::min(dM,EMS[nE-1].theta[AIR]*(Constants::density_water*EMS[nE-1].L)); + Sdata.mass[SurfaceFluxes::MS_EVAPORATION] += dM; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += dM; } EMS[nE-1].theta[WATER] += dM/(Constants::density_water*EMS[nE-1].L); @@ -333,42 +362,50 @@ void WaterTransport::compSurfaceSublimation(const CurrentMeteo& Mdata, double ql assert(EMS[nE-1].M >= (-Constants::eps2)); //mass must be positive // Update remaining volumetric contents and density - EMS[nE-1].theta[AIR] = std::max(0., 1.0 - EMS[nE-1].theta[WATER] - EMS[nE-1].theta[ICE] - EMS[nE-1].theta[SOIL]); - EMS[nE-1].Rho = (EMS[nE-1].theta[ICE] * Constants::density_ice) - + (EMS[nE-1].theta[WATER] * Constants::density_water) - + (EMS[nE-1].theta[SOIL] * EMS[nE-1].soil[SOIL_RHO]); + EMS[nE-1].theta[AIR] = std::max(0., 1.0 - EMS[nE-1].theta[WATER] - EMS[nE-1].theta[WATER_PREF] - EMS[nE-1].theta[ICE] - EMS[nE-1].theta[SOIL]); + EMS[nE-1].updDensity(); } else if ((ql < (-Constants::eps2)) && (nE > 0)) { - // If there is water in some form and ql < 0, SUBLIMATE and/or EVAPORATE some mass off + // If there is water in some form and ql < 0, EVAPORATE some mass off std::vector M_Solutes(Xdata.number_of_solutes, 0.); // Mass of solutes from disappearing phases size_t e = nE; - while ((e > 0) && (ql < (-Constants::eps2))) { // While energy is available + double ql2 = ql; // Dummy of ql. We want to mimick the effect of evaporation from deeper layers, if the energy flux is so large, that complete elements disappear. + // But, since we now have separate locations for water and ice evaporation respectively sublimation, we need to calculate already here the + // sublimation of ice to decide whether any water is evaporated from the next element below. So, ql2 also keeps track of sublimation, which is not + // applied here, but later in VapourTransport. + while ((e > 0) && (ql2 < (-Constants::eps2))) { // While energy is available e--; if ((iwatertransportmodel_snow != RICHARDSEQUATION && e>=Xdata.SoilNode) || (iwatertransportmodel_soil != RICHARDSEQUATION && e0.); // If there is water ... - if (EMS[e].theta[WATER] > ((e==nE-1)?(2.*Constants::eps):0.)) { + if ((EMS[e].theta[WATER]+EMS[e].theta[WATER_PREF]) > ((e==nE-1)?(2.*Constants::eps):0.)) { //For the top layer, it is important to keep a tiny amount of liquid water, so we are able to detect whether we need the //implicit or explicit treatment of the top boundary condition when solving the heat equation. - const double theta_w0 = EMS[e].theta[WATER]-( (e==nE-1)? (2.*Constants::eps) : 0. ); + const double theta_w0 = (EMS[e].theta[WATER]+EMS[e].theta[WATER_PREF]) - ( (e==nE-1) ? (2.*Constants::eps) : 0. ); dM = ql*sn_dt/Constants::lh_vaporization; M = theta_w0*Constants::density_water*L0; // Check that you only take the available mass of water if (-dM >= M) { dM = -M; - EMS[e].theta[WATER] += dM/(Constants::density_water*L0); + // First empty preferential flow + const double dM_pref = std::max(-EMS[e].theta[WATER_PREF], dM/(Constants::density_water*L0)); + EMS[e].theta[WATER_PREF] += dM_pref; + // Then matrix flow + EMS[e].theta[WATER] += (dM - dM_pref)/(Constants::density_water*L0); // Add solutes to Storage for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { M_Solutes[ii] += EMS[e].conc[WATER][ii]*theta_w0*L0; } } else { - EMS[e].theta[WATER] += dM/(Constants::density_water*L0); + // First empty preferential flow + const double dM_pref = std::max(-EMS[e].theta[WATER_PREF], dM/(Constants::density_water*L0)); + EMS[e].theta[WATER_PREF] += dM_pref; + // Then matrix flow + EMS[e].theta[WATER] += (dM - dM_pref)/(Constants::density_water*L0); for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { EMS[e].conc[WATER][ii] *= theta_w0/EMS[e].theta[WATER]; } @@ -376,105 +413,32 @@ void WaterTransport::compSurfaceSublimation(const CurrentMeteo& Mdata, double ql EMS[e].M += dM; assert(EMS[e].M >= (-Constants::eps2)); //mass must be positive Sdata.mass[SurfaceFluxes::MS_EVAPORATION] += dM; + if(EMS[nE-1].theta[SOIL] > 0 ) { + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += dM; + } ql -= dM*Constants::lh_vaporization/sn_dt; // Update the energy used + ql2 -= dM*Constants::lh_vaporization/sn_dt; // Update the energy used } - if (ql < (-Constants::eps2)) { + if (ql2 < (-Constants::eps2)) { // If there is no water or if there was not enough water ... const double theta_i0 = EMS[e].theta[ICE]; M = theta_i0*Constants::density_ice*L0; - dM = ql*sn_dt/Constants::lh_sublimation; + dM = ql2*sn_dt/Constants::lh_sublimation; if (-dM > M) { dM = -M; - // Add solutes to Storage - for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { - M_Solutes[ii] += EMS[e].conc[ICE][ii]*theta_i0*L0; - } - EMS[e].theta[ICE]=0.0; dL = 0.; - } else { - dL = dM/(EMS[e].Rho); - if (e < Xdata.SoilNode) { - dL = 0.; - } - NDS[e+1].z += dL; EMS[e].L0 = EMS[e].L = L0 + dL; - NDS[e+1].z += NDS[e+1].u; NDS[e+1].u = 0.0; - - EMS[e].E = EMS[e].Eps = EMS[e].dEps = EMS[e].Eps_e = EMS[e].Eps_v = EMS[e].S = 0.0; - EMS[e].theta[ICE] *= L0/EMS[e].L; - EMS[e].theta[ICE] += dM/(Constants::density_ice*EMS[e].L); - EMS[e].theta[WATER] *= L0/EMS[e].L; - for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { - EMS[e].conc[ICE][ii] *= L0*theta_i0/(EMS[e].theta[ICE]*EMS[e].L); - } - } - EMS[e].M += dM; - //if we remove the whole mass, we might have some small inconcistencies between mass and theta[ICE]*density*L -> negative - //but the whole element will be removed anyway when getting out of here - assert(EMS[e].M >= (-Constants::eps2)); - Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dM; - ql -= dM*Constants::lh_sublimation/sn_dt; // Update the energy used - - // If present at surface, surface hoar is sublimated away - if (e == nE-1) { - hoar = dM; } + ql2 -= dM*Constants::lh_sublimation/sn_dt; //Anticipated update of the energy that will be used for sublimation } // Update remaining volumetric contents and density - EMS[e].theta[AIR] = std::max(0., 1.0 - EMS[e].theta[WATER] - EMS[e].theta[ICE] - EMS[e].theta[SOIL]); - EMS[e].Rho = (EMS[e].theta[ICE] * Constants::density_ice) + (EMS[e].theta[WATER] * Constants::density_water) + (EMS[e].theta[SOIL] * EMS[e].soil[SOIL_RHO]); - } else if (e==nE-1) { - //In case we use RE for snow or soil, check if we can sublimate hoar away: - dM = ql*sn_dt/Constants::lh_sublimation; - if (-dM > NDS[nN-1].hoar ) dM=-NDS[nN-1].hoar; //Limit, so that only the hoar will sublimate - - if (dM < 0. ) { //If we have actual hoar to sublimate, do it: - const double L0 = EMS[e].L; - const double theta_i0 = EMS[e].theta[ICE]; - M = theta_i0*Constants::density_ice*L0; - if (-dM > M) { - dM = -M; - // Add solutes to Storage - for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { - M_Solutes[ii] += EMS[e].conc[ICE][ii]*theta_i0*L0; - } - EMS[e].theta[ICE]=0.0; dL = 0.; - } else { - dL = dM/(EMS[e].Rho); - if (e < Xdata.SoilNode) { - dL = 0.; - } - NDS[e+1].z += dL; EMS[e].L0 = EMS[e].L = L0 + dL; - NDS[e+1].z += NDS[e+1].u; NDS[e+1].u = 0.0; - - EMS[e].E = EMS[e].Eps = EMS[e].dEps = EMS[e].Eps_e = EMS[e].Eps_v = EMS[e].S = 0.0; - EMS[e].theta[ICE] *= L0/EMS[e].L; - EMS[e].theta[ICE] += dM/(Constants::density_ice*EMS[e].L); - EMS[e].theta[WATER] *= L0/EMS[e].L; - for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { - EMS[e].conc[ICE][ii] *= L0*theta_i0/(EMS[e].theta[ICE]*EMS[e].L); - } - } - EMS[e].M += dM; - //if we remove the whole mass, we might have some small inconcistencies between mass and theta[ICE]*density*L -> negative - //but the whole element will be removed anyway when getting out of here - assert(EMS[e].M>=(-Constants::eps2)); - Sdata.mass[SurfaceFluxes::MS_SUBLIMATION] += dM; - ql -= dM*Constants::lh_sublimation/sn_dt; // Update the energy used - - // If present at surface, surface hoar is sublimated away - if (e == nE-1) { - hoar = dM; - } - - // Update remaining volumetric contents and density - EMS[nE-1].theta[AIR] = std::max(0., 1.0 - EMS[nE-1].theta[WATER] - EMS[nE-1].theta[ICE] - EMS[nE-1].theta[SOIL]); - EMS[nE-1].Rho = (EMS[nE-1].theta[ICE] * Constants::density_ice) + (EMS[nE-1].theta[WATER] * Constants::density_water) + (EMS[nE-1].theta[SOIL] * EMS[nE-1].soil[SOIL_RHO]); - } + EMS[e].theta[AIR] = std::max(0., 1.0 - EMS[e].theta[WATER] - EMS[e].theta[WATER_PREF] - EMS[e].theta[ICE] - EMS[e].theta[SOIL]); + EMS[e].updDensity(); } - + //check that thetas and densities are consistent assert(EMS[e].theta[SOIL] >= (-Constants::eps2) && EMS[e].theta[SOIL] <= (1.+Constants::eps2)); assert(EMS[e].theta[ICE] >= (-Constants::eps2) && EMS[e].theta[ICE]<=(1.+Constants::eps2)); assert(EMS[e].theta[WATER] >= (-Constants::eps2) && EMS[e].theta[WATER]<=(1.+Constants::eps2)); + assert(EMS[e].theta[WATER_PREF] >= (-Constants::eps2) && EMS[e].theta[WATER_PREF]<=(1.+Constants::eps2)); assert(EMS[e].theta[AIR] >= (-Constants::eps2) && EMS[e].theta[AIR]<=(1.+Constants::eps2)); assert(EMS[e].Rho >= (-Constants::eps2) || EMS[e].Rho==IOUtils::nodata); //we want positive density } @@ -500,34 +464,6 @@ void WaterTransport::compSurfaceSublimation(const CurrentMeteo& Mdata, double ql } } } - - //Any left over energy (ql) should go to soil (surfacefluxrate). Units: ql = [W/m^2]=[J/s/m^2], surfacefluxrate=[m^3/m^2/s] - if(fabs(ql)>Constants::eps2) { // TODO Check that this takes correctly care of energy balance - RichardsEquationSolver1d.surfacefluxrate+=(ql/Constants::lh_vaporization)/Constants::density_water; - Sdata.mass[SurfaceFluxes::MS_EVAPORATION] += ql*sn_dt/Constants::lh_vaporization; - } - - // Check for surface hoar destruction or formation (once upon a time ml_sn_SurfaceHoar) - if ((Mdata.rh > hoar_thresh_rh) || (Mdata.vw > hoar_thresh_vw) || (Mdata.ta >= IOUtils::C_TO_K(hoar_thresh_ta))) { - //if rh is very close to 1, vw too high or ta too high, surface hoar is destroyed - hoar = std::min(hoar, 0.); - } - - Sdata.hoar += hoar; - NDS[nN-1].hoar += hoar; - if (NDS[nN-1].hoar < 0.) { - NDS[nN-1].hoar = 0.; - } - for (size_t e = 0; e=Xdata.SoilNode) || (iwatertransportmodel_soil==RICHARDSEQUATION && e theta_r) { - NDS[e+1].hoar = 0.; - } - } - // At the end also update the overall height - cH_old = Xdata.cH; - Xdata.cH = NDS[Xdata.getNumberOfNodes()-1].z + NDS[Xdata.getNumberOfNodes()-1].u; - if (Xdata.mH!=Constants::undefined) Xdata.mH -= (cH_old - Xdata.cH); } /** @@ -547,9 +483,7 @@ void WaterTransport::compSurfaceSublimation(const CurrentMeteo& Mdata, double ql void WaterTransport::mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata) { const size_t nN = Xdata.getNumberOfNodes(), nE = nN-1; - // Commented to remove set but not used compiler warning - // size_t rnN = nN, - size_t rnE = nN-1; + size_t rnN = nN, rnE = nN-1; vector& EMS = Xdata.Edata; if ((nN == Xdata.SoilNode+1) @@ -559,11 +493,13 @@ void WaterTransport::mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata) return; } + bool verify_top_element = false; + double removedMass = 0.; size_t eUpper = nE; // Index of the upper element, the properties of which will be transferred to the lower adjacent one while (eUpper-- > Xdata.SoilNode) { bool enforce_merge = true; // To enforce merging in special cases if ((EMS[eUpper].L < minimum_l_element) || (EMS[eUpper].mk%100 == 3)) { - if ((EMS[eUpper].mk >= 100) && (EMS[eUpper].L >= 0.5 * minimum_l_element)) { + if ((EMS[eUpper].mk >= 100 && int(EMS[eUpper].mk/1000)!=9) && (EMS[eUpper].L >= 0.5 * minimum_l_element)) { enforce_merge = false; } if (EMS[eUpper].mk%100 == 3) { @@ -579,80 +515,97 @@ void WaterTransport::mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata) enforce_merge = false; } const double theta_r=((iwatertransportmodel_snow==RICHARDSEQUATION && eUpper>=Xdata.SoilNode) || (iwatertransportmodel_soil==RICHARDSEQUATION && eUpper 0 && eUpper == nE-1 && EMS[eUpper].theta[ICE] > 0.2 * Snowpack::min_ice_content && EMS[eUpper].L > 0.2 * minimum_l_element && EMS[eUpper-1].theta[SOIL] < Constants::eps && EMS[eUpper].theta[ICE] > Constants::eps && EMS[eUpper].theta[WATER] < theta_r + Constants::eps && EMS[eUpper-1].theta[WATER] > theta_r + Constants::eps); // Don't merge a dry surface snow layer with a wet one below, as the surface node may then experience a sudden increase in temperature, destroying energy balance. - + if (do_merge && is_snow_layer && !wet_layer_exception) { bool UpperJoin=false; // Default is joining with elements below - bool merged = true; // true: element is finally merged, false: element is finally removed. + bool merged = true; // true: element is finally merged, false: element is finally removed. if (eUpper > Xdata.SoilNode) { // If we have snow elements below to merge with - // We always merge snow elements, except if it is the top element, which is removed when the ice contents is below the threshold. + // We always merge snow elements + merged=true; if ( (eUpper == rnE-1) && (EMS[eUpper].theta[ICE] < Snowpack::min_ice_content) ) { - merged=false; + // In this case, we would prefer to keep the eUpper-1 element density constant, which is done in SnowStation::mergeElements(...) // In case we solve snow with Richards equation AND we remove the top element, we apply the water in the top layer as a Neumann boundary flux in the RE - if (iwatertransportmodel_snow == RICHARDSEQUATION) { - RichardsEquationSolver1d.surfacefluxrate+=(EMS[eUpper].theta[WATER]*EMS[eUpper].L)/(sn_dt); + if (iwatertransportmodel_snow == RICHARDSEQUATION && variant != "SEAICE") { + RichardsEquationSolver1d_matrix.surfacefluxrate+=((EMS[eUpper].theta[WATER]+EMS[eUpper].theta[WATER_PREF])*EMS[eUpper].L)/(sn_dt); // We remove water from the element, which is now in surfacefluxrate + EMS[eUpper].theta[AIR]+=(EMS[eUpper].theta[WATER]+EMS[eUpper].theta[WATER_PREF]); + removedMass += (EMS[eUpper].theta[WATER] + EMS[eUpper].theta[WATER_PREF]) * Constants::density_water * EMS[eUpper].L; EMS[eUpper].theta[WATER]=0.; + EMS[eUpper].theta[WATER_PREF]=0.; // Adjust density and mass accordingly - EMS[eUpper].Rho = (EMS[eUpper].theta[ICE]*Constants::density_ice) + (EMS[eUpper].theta[WATER]*Constants::density_water) + (EMS[eUpper].theta[SOIL]*EMS[eUpper].soil[SOIL_RHO]); + EMS[eUpper].updDensity(); EMS[eUpper].M = EMS[eUpper].Rho*EMS[eUpper].L; } - } else { - merged=true; } + // We never merge snow elements with elements containing soil inside the snowpack (e.g., for snow farming) if (EMS[eUpper-1].theta[SOIL]>Constants::eps) { merged=false; } + // After dealing with all possibilities, now finally do the merge: - SnowStation::mergeElements(EMS[eUpper-1], EMS[eUpper], merged, (eUpper==rnE-1)); + SnowStation::mergeElements(EMS[eUpper-1], EMS[eUpper], merged, (eUpper==rnE-1 && variant != "SEAICE")); + + // The upper element may grow too much in length by subsequent element merging, limit this! Note that this has the desired effect of averaging the two top elements. + if(eUpper==rnE-1 && merged==true) { + verify_top_element=true; + } } else { // We are dealing with first snow element above soil - if (rnE-1 > Xdata.SoilNode && EMS[eUpper+1].L > 0.) { // If at least one snow layer above AND this layer above is not marked to be removed yet. + if (rnE-1 > Xdata.SoilNode && EMS[eUpper+1].L > 0. && EMS[eUpper+1].Rho > 0.) { // If at least one snow layer above AND this layer above is not marked to be removed yet. // In case it is the lowest snow element and there are snow elements above, join with the element above: merged=true; - SnowStation::mergeElements(EMS[eUpper], EMS[eUpper+1], true, (eUpper==nE-1)); + SnowStation::mergeElements(EMS[eUpper], EMS[eUpper+1], true, (eUpper==nE-1 && variant != "SEAICE")); UpperJoin=true; } else { // Else we remove element merged=false; - if(Xdata.SoilNode>0) { // Case of soil present - // In case of soil and removal of first snow element above soil: - // First, make sure there is no ice anymore, as we do not want to transfer ice over soil-snow interface: - EMS[eUpper].theta[WATER]+=EMS[eUpper].theta[ICE]*(Constants::density_ice/Constants::density_water); - // Take care of energy used for melting the ice: - const double ql = (EMS[eUpper].theta[ICE] * EMS[eUpper].L * Constants::density_ice * Constants::lh_fusion ); // J/m^2 - //ql is energy crossing the soil-snow interface and should be considered part of the soil-snow heat flux: - Sdata.qg0 += ql/sn_dt; - //Adjust upper soil element for the energy extracted to melt the ice: + + // First, make sure there is no ice anymore, as we do not want to transfer ice over soil-snow interface: + EMS[eUpper].theta[WATER] += EMS[eUpper].theta[ICE] * (Constants::density_ice/Constants::density_water); + EMS[eUpper].theta[ICE] = 0.; + + // Take care of energy used for melting the ice: + const double ql = (EMS[eUpper].theta[ICE] * EMS[eUpper].L * Constants::density_ice * Constants::lh_fusion); // J/m^2 + + // ql is energy crossing the soil-snow interface and should be considered part of the soil-snow heat flux: + Sdata.qg0 += ql/sn_dt; + + if (Xdata.SoilNode > 0) { // Case of soil present + // Adjust upper soil element for the energy extracted to melt the ice: EMS[eUpper-1].Te -= ql / (EMS[eUpper-1].c[TEMPERATURE] * EMS[eUpper-1].Rho * EMS[eUpper-1].L); - // Set amount of ice to 0. - EMS[eUpper].theta[ICE]=0.; - if(iwatertransportmodel_soil != RICHARDSEQUATION) { //Only move water into soil when we don't run richardssolver for soil - // Now do actual merging of the elements: - SnowStation::mergeElements(EMS[eUpper-1], EMS[eUpper], merged, (eUpper==rnE-1)); - } else { - // In this case, we don't need to call SnowStation::mergeElements(), as we put the mass in surfacefluxrate or soilsurfacesourceflux and just remove the element. - if(iwatertransportmodel_snow != RICHARDSEQUATION || (rnE-1)==Xdata.SoilNode) { //If we use BUCKET for snow OR we remove the last snow element, we can consider it to be the surface flux - RichardsEquationSolver1d.surfacefluxrate+=(EMS[eUpper].M/Constants::density_water)/(sn_dt); //surfacefluxrate=[m^3/m^2/s]. - } else { //We are in the middle of the domain solved by the Richards Equation, so it becomes a source: - RichardsEquationSolver1d.soilsurfacesourceflux+=(EMS[eUpper].M/Constants::density_water)/(sn_dt);//soilsurfacesourceflux=[m^3/m^2/s]. + } + + // route mass and solute load to runoff + removedMass += EMS[eUpper].M; + if (iwatertransportmodel_snow != RICHARDSEQUATION) { + // The mass from the snow element to be removed is snowpack runoff + Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += EMS[eUpper].M; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += EMS[eUpper].M; + if (iwatertransportmodel_soil != RICHARDSEQUATION) { + if (Xdata.SoilNode > 0) { + // Only move water into soil when we don't run richardssolver for soil ... + SnowStation::mergeElements(EMS[eUpper-1], EMS[eUpper], merged, (eUpper==rnE-1 && variant != "SEAICE")); } + } else { + // ... otherwise put it in surfacefluxrate + RichardsEquationSolver1d_matrix.surfacefluxrate += EMS[eUpper].M / Constants::density_water / sn_dt; } + } else { + RichardsEquationSolver1d_matrix.soilsurfacesourceflux += EMS[eUpper].M / Constants::density_water / sn_dt; } - // route mass and solute load to runoff - if (iwatertransportmodel_snow != RICHARDSEQUATION || (rnE-1)==Xdata.SoilNode) { //When snow water transport is solved by Richards Equation, we calculate this there. - //Note: the second clause is necessary, because when we remove the last snow element, there is no way for the Richards Solver to figure out that this surfacefluxrate is still coming from the snowpack. + //When snow water transport is solved by Richards Equation, we calculate snowpack runoff there. + //However: when we remove the last snow element, there is no way for the Richards Solver to figure out that this surfacefluxrate is still coming from the snowpack. In that case, we should add the water to snowpack runoff here. + if ( (rnE-1) == Xdata.SoilNode && (iwatertransportmodel_soil == RICHARDSEQUATION || Xdata.SoilNode == 0)) { + // Special case for RE: if all snow elements disappear, soilsurfacesourceflux has no meaning, so it should become part of the surfacefluxrate: + RichardsEquationSolver1d_matrix.surfacefluxrate += RichardsEquationSolver1d_matrix.soilsurfacesourceflux; + RichardsEquationSolver1d_matrix.soilsurfacesourceflux = 0.; if (iwatertransportmodel_snow == RICHARDSEQUATION) { - // Special case for RE: if all snow elements disappear, soilsurfacesourceflux has no meaning, so it should become part of the surfacefluxrate: - RichardsEquationSolver1d.surfacefluxrate += RichardsEquationSolver1d.soilsurfacesourceflux; - RichardsEquationSolver1d.soilsurfacesourceflux = 0.; - // Now make sure surfacefluxrate is considered snowpack runoff: - Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += RichardsEquationSolver1d.surfacefluxrate*Constants::density_water*sn_dt; - } else { - // Bucket and NIED case: - Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += EMS[eUpper].M; + // Now make sure any left-over surfacefluxrate is considered snowpack runoff: + Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += RichardsEquationSolver1d_matrix.surfacefluxrate*Constants::density_water*sn_dt; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += RichardsEquationSolver1d_matrix.surfacefluxrate*Constants::density_water*sn_dt; } } @@ -666,7 +619,7 @@ void WaterTransport::mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata) } } rnE--; - // rnN--; + rnN--; if(UpperJoin==false) { EMS[eUpper].Rho = Constants::undefined; if (!merged) { @@ -674,14 +627,24 @@ void WaterTransport::mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata) } if ((eUpper < nE-1) && (EMS[eUpper+1].Rho < 0.) && (EMS[eUpper+1].L > 0.)) { // When upper+1 element is not marked to be removed, but we merge the upper element, we should remove the upper+1 element. + // Note that this starts to compound elements (see remark in SnowStation::reduceNumberOfElements(const size_t& rnE)): EMS[eUpper+1].L *= -1.; } } else { - EMS[eUpper+1].Rho = Constants::undefined; - if (!merged) - EMS[eUpper+1].L *= -1.; // Mark element as "removed". - if ((eUpper+1 < nE-1) && (EMS[eUpper+2].Rho < 0.) && (EMS[eUpper+2].L > 0.)) { - EMS[eUpper+2].L *= -1.; + if (EMS[eUpper+1].Rho == Constants::undefined) { + // The upper join has the risk that an element (eUpper+1) could become marked Rho == Constants::undefined twice, + // in which case we reduced rnE and rnN one too much. + rnE++; + rnN++; + } else { + EMS[eUpper+1].Rho = Constants::undefined; + if (!merged && EMS[eUpper+1].L > 0.) { + EMS[eUpper+1].L *= -1.; // Mark element as "removed". + } + if ((eUpper+1 < nE-1) && (EMS[eUpper+2].Rho < 0.) && (EMS[eUpper+2].L > 0.)) { + // Note that this likely starts to compound elements (see remark in SnowStation::reduceNumberOfElements(const size_t& rnE)): + EMS[eUpper+2].L *= -1.; + } } } } @@ -689,11 +652,26 @@ void WaterTransport::mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata) if (rnE < nE) { Xdata.reduceNumberOfElements(rnE); if (!useSoilLayers && (rnE == Xdata.SoilNode)) { - Xdata.Ndata[Xdata.SoilNode].T = std::min(Constants::melting_tk, Xdata.Ndata[Xdata.SoilNode].T); + Xdata.Ndata[Xdata.SoilNode].T = std::min(Constants::meltfreeze_tk, Xdata.Ndata[Xdata.SoilNode].T); + } + if (verify_top_element && rnE > 0 && rnE > Xdata.SoilNode) { + // Note: we have to check for the SoilNode, because verify_top_element may have been set to true, but multiple element removals may have + // set rnE to the upper soil element, in case we should inhibit element splitting. + if (EMS[Xdata.getNumberOfElements()-1].L > 2.*comb_thresh_l) { + Xdata.splitElement(Xdata.getNumberOfElements()-1); + rnE++; + } + } + } + + if (removedMass > 0. && variant == "SEAICE" && iwatertransportmodel_snow == RICHARDSEQUATION) { + const double delta_h = (removedMass / (Constants::density_water + SeaIce::betaS * SeaIce::OceanSalinity)); + for (size_t e=Xdata.SoilNode; e=Xdata.SoilNode) { + if (rnE >= Xdata.SoilNode) { Xdata.ColdContent = 0.; for (size_t e=Xdata.SoilNode; e Constants::min_rho && EMS[e].Rho <= Constants::max_rho)) { - prn_msg(__FILE__, __LINE__, "err", Date(), "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf air:%le", - e, nE, EMS[e].Rho, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[AIR]); + EMS[e].theta[AIR] = 1.0 - EMS[e].theta[WATER] - EMS[e].theta[WATER_PREF] - EMS[e].theta[ICE]; + EMS[e].updDensity(); + if (!(EMS[e].Rho > Constants::eps && EMS[e].theta[AIR] >= 0.)) { + prn_msg(__FILE__, __LINE__, "err", Date(), "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf wat_pref:%lf air:%le", + e, nE, EMS[e].Rho, EMS[e].theta[ICE], EMS[e].theta[WATER], EMS[e].theta[WATER_PREF], EMS[e].theta[AIR]); throw IOException("Cannot evaluate mass balance in adjust density routine", AT); } } const double cH_old = Xdata.cH; Xdata.cH = NDS[Xdata.getNumberOfNodes()-1].z + NDS[Xdata.getNumberOfNodes()-1].u; - Xdata.mH -= (cH_old - Xdata.cH); + if (Xdata.mH!=Constants::undefined) Xdata.mH -= (cH_old - Xdata.cH); } /** @@ -771,7 +751,7 @@ void WaterTransport::adjustDensity(SnowStation& Xdata) * @param *Sdata * @param *Mdata */ -void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata) +void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql) { size_t nN = Xdata.getNumberOfNodes(); size_t nE = nN-1; @@ -779,15 +759,14 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat vector& EMS = Xdata.Edata; //NIED (H. Hirashima) //Fz HACK Below follow some NIED specific declarations; please describe - std::vector Such(nE, 0.); //Suction pressure head - std::vector HydK(nE, 0.); //Hydraulic Conductivity - double ThR,SatK; //Residual water content, saturated water content and saturated hydraulic conductivity for both layers respectively. - double FluxQ; //Flux between layers + std::vector Such(nE, 0.); //Suction pressure head + std::vector HydK(nE, 0.); //Hydraulic Conductivity + double ThR,SatK; //Residual water content, saturated water content and saturated hydraulic conductivity for both layers respectively. + double FluxQ; //Flux between layers double Rh0,Rh1,Rk0,Rk1; double q0, qlim, qlim0, qlim1; double P[15]={0.}; - unsigned int WatCalc=1; //Number of iterations in WaterTransport model "NIED". - + unsigned int WatCalc=1; //Number of iterations in WaterTransport model "NIED". // First, consider no soil with no snow on the ground if (!useSoilLayers && nN == 1) { return; @@ -795,7 +774,7 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat if (Mdata.psum > 0. && Mdata.psum_ph>0.) { //there is some rain double Store = (Mdata.psum * Mdata.psum_ph) / Constants::density_water; // Depth of liquid precipitation ready to infiltrate snow and/or soil (m) // Now find out whether you are on an impermeable surface and want to create a water layer ... - if (water_layer && (Store > 0.) + if (water_layer && iwatertransportmodel_snow != RICHARDSEQUATION && iwatertransportmodel_soil != RICHARDSEQUATION && (Store > 0.) && ((useSoilLayers && (nE == Xdata.SoilNode) && (EMS[nE-1].theta[SOIL] > 0.95)) || ((nE-1 > 0) && (EMS[nE-2].theta[ICE] > 0.95)))) { nE++; @@ -814,10 +793,10 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat EMS[nE-1].Te = Mdata.ta; EMS[nE-1].L0 = EMS[nE-1].L = z_water; - EMS[nE-1].Rho = Constants::density_water; + EMS[nE-1].theta[WATER] = 1.0; + EMS[nE-1].updDensity(); EMS[nE-1].M = EMS[nE-1].L0 * EMS[nE-1].Rho; assert(EMS[nE-1].M >= (-Constants::eps2)); //mass must be positive - EMS[nE-1].theta[WATER] = 1.0; EMS[nE-1].mk = 19; //NOTE all other microstructure parameters should better be set to Constants::undefined but ... EMS[nE-1].N3 = 1.; @@ -825,8 +804,9 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat EMS[nE-1].sp = 1.; EMS[nE-1].rg = 1.0; EMS[nE-1].rb = 0.5; - Xdata.cH = Xdata.mH = NDS[nN-1].z + NDS[nN-1].u; - } else if (water_layer && (Store > 0.) + Xdata.cH = NDS[nN-1].z + NDS[nN-1].u; + if (Xdata.mH != IOUtils::nodata) Xdata.mH = Xdata.cH; + } else if (water_layer && iwatertransportmodel_snow != RICHARDSEQUATION && iwatertransportmodel_soil != RICHARDSEQUATION && (Store > 0.) && ((useSoilLayers && (nE == Xdata.SoilNode+1) && (EMS[nE-2].theta[SOIL] > 0.95)) || ((nE > 1) && (EMS[nE-2].theta[ICE] > 0.95)))) { // Put rain water in existing wet layer @@ -837,7 +817,8 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat EMS[nE-1].L0 = EMS[nE-1].L = (NDS[nN-1].z + NDS[nN-1].u) - (NDS[nN-2].z + NDS[nN-2].u); EMS[nE-1].M = EMS[nE-1].L0 * EMS[nE-1].Rho; assert(EMS[nE-1].M >= (-Constants::eps2)); //mass must be positive - Xdata.cH = Xdata.mH = NDS[nN-1].z + NDS[nN-1].u; + Xdata.cH = NDS[nN-1].z + NDS[nN-1].u; + if (Xdata.mH != IOUtils::nodata) Xdata.mH = Xdata.cH; } //Put rain water in the layers, starting from the top element. @@ -862,6 +843,7 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat // Update snowpack runoff with rain infiltrating into soil (equal to Store when e == Xdata.SoilNode) if (e == Xdata.SoilNode) { Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += Store * Constants::density_water; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += Store * Constants::density_water; } // Update soil runoff with rain (equal to Store when e == 0) if (e == 0) { @@ -871,7 +853,16 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat } //This adds the left over rain input to the surfacefluxrate, to be used as BC in Richardssolver: - RichardsEquationSolver1d.surfacefluxrate+=(Store)/(sn_dt); //NANDER: Store=[m], surfacefluxrate=[m^3/m^2/s] + if (pref_flow_rain_input_domain=="MATRIX") { + // Put rain in matrix domain + RichardsEquationSolver1d_matrix.surfacefluxrate+=(Store)/(sn_dt); //NANDER: Store=[m], surfacefluxrate=[m^3/m^2/s] + } else if (pref_flow_rain_input_domain=="PREF_FLOW") { + // Put rain in preferential domain + RichardsEquationSolver1d_pref.surfacefluxrate+=(Store)/(sn_dt); //NANDER: Store=[m], surfacefluxrate=[m^3/m^2/s] + } else { + prn_msg( __FILE__, __LINE__, "err", Mdata.date, "Unknown domain to transfer rain water to (check key PREF_FLOW_RAIN_INPUT_DOMAIN)."); + throw; + } Sdata.mass[SurfaceFluxes::MS_RAIN] += Mdata.psum * Mdata.psum_ph; } } @@ -898,8 +889,11 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat for (size_t eUpper = nE-1, eLower = nE-2; eUpper >= 1; eUpper--, eLower-- ) { // Determine the additional storage capacity due to refreezing const double dth_w = EMS[eUpper].c[TEMPERATURE] * EMS[eUpper].Rho / Constants::lh_fusion / Constants::density_water - * std::max(0., EMS[eUpper].melting_tk-EMS[eUpper].Te); - if ((eUpper == nE-1) && (EMS[eLower].theta[AIR] <= 0.05) && water_layer) { + * std::max(0., EMS[eUpper].meltfreeze_tk-EMS[eUpper].Te); + if ((variant=="SEAICE" && Xdata.Seaice!=NULL) && Xdata.Ndata[eUpper].z + 0.5 * Xdata.Edata[eUpper].L < Xdata.Seaice->SeaLevel) { + // for sea ice: elements below sea level may fill entire pore space + Wres = std::max(0., (1. - Xdata.Edata[eUpper].theta[ICE]) * (Constants::density_ice/Constants::density_water)); + } else if ((eUpper == nE-1) && (EMS[eLower].theta[AIR] <= 0.05) && water_layer) { // allow for a water table in the last layer above road/rock Wres = Constants::density_ice/Constants::density_water * (1. - EMS[eUpper].theta[ICE] - EMS[eUpper].theta[SOIL] - 0.05); @@ -915,11 +909,11 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat // For watertransport model NIED: // If we have too much water in the element (can happen when two quite saturated elements get joined), put the excess water in excess_water. // Note: we have to do this in case of NIED method, because else SEff will not be calculated correctly. - if(EMS[eUpper].theta[WATER] > (1.-EMS[eUpper].theta[ICE]-EMS[eUpper].theta[SOIL])*(Constants::density_ice/Constants::density_water)+Constants::eps2) { + if(EMS[eUpper].theta[WATER] > (1.-EMS[eUpper].theta[WATER_PREF]-EMS[eUpper].theta[ICE]-EMS[eUpper].theta[SOIL])*(Constants::density_ice/Constants::density_water)+Constants::eps2) { const double theta_water_orig=EMS[eUpper].theta[WATER]; - EMS[eUpper].theta[WATER]=(1.-EMS[eUpper].theta[ICE]-EMS[eUpper].theta[SOIL])*(Constants::density_ice/Constants::density_water); - EMS[eUpper].theta[AIR]=(1.-EMS[eUpper].theta[ICE]-EMS[eUpper].theta[SOIL]-EMS[eUpper].theta[WATER]); - EMS[eUpper].Rho = (EMS[eUpper].theta[ICE] * Constants::density_ice) + (EMS[eUpper].theta[WATER] * Constants::density_water) + (EMS[eUpper].theta[SOIL] * EMS[eUpper].soil[SOIL_RHO]); + EMS[eUpper].theta[WATER]=(1.-EMS[eUpper].theta[WATER_PREF]-EMS[eUpper].theta[ICE]-EMS[eUpper].theta[SOIL])*(Constants::density_ice/Constants::density_water); + EMS[eUpper].theta[AIR]=(1.-EMS[eUpper].theta[ICE]-EMS[eUpper].theta[SOIL]-EMS[eUpper].theta[WATER]-EMS[eUpper].theta[WATER_PREF]); + EMS[eUpper].updDensity(); EMS[eUpper].M = EMS[eUpper].Rho*EMS[eUpper].L; // Put excess water in excess_water @@ -936,18 +930,13 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat // In that case you need to update the volumetric air content and the density of the top element // as it may have caught some rain! Only top element should be considered, as when rain would have // infiltrated lower elements as well, W_upper>Wres. - EMS[eUpper].theta[AIR] = std::max(0., 1. - EMS[eUpper].theta[WATER] - EMS[eUpper].theta[ICE] - EMS[eUpper].theta[SOIL]); - EMS[eUpper].Rho = (EMS[eUpper].theta[ICE] * Constants::density_ice) - + (EMS[eUpper].theta[WATER] * Constants::density_water) - + (EMS[eUpper].theta[SOIL] * EMS[eUpper].soil[SOIL_RHO]); - assert(EMS[eUpper].Rho>=0. || EMS[eUpper].Rho==IOUtils::nodata); //we want positive density - if ( EMS[eUpper].theta[SOIL] < Constants::eps2 ) { - if ( !(EMS[eUpper].Rho > Constants::min_rho && EMS[eUpper].Rho <= Constants::max_rho) ) { - prn_msg(__FILE__, __LINE__, "err", Mdata.date, - "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf air:%le", - eUpper, nE, EMS[eUpper].Rho, EMS[eUpper].theta[ICE], EMS[eUpper].theta[WATER], EMS[eUpper].theta[AIR]); - throw IOException("Cannot transfer water within the snowpack in transportWater()", AT); - } + EMS[eUpper].theta[AIR] = std::max(0., 1. - EMS[eUpper].theta[WATER] - EMS[eUpper].theta[WATER_PREF] - EMS[eUpper].theta[ICE] - EMS[eUpper].theta[SOIL]); + EMS[eUpper].updDensity(); + if (!(EMS[eUpper].Rho > Constants::eps && EMS[eUpper].theta[AIR] >= 0.)) { + prn_msg(__FILE__, __LINE__, "err", Mdata.date, + "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf wat_pref:%lf air:%le", + eUpper, nE, EMS[eUpper].Rho, EMS[eUpper].theta[ICE], EMS[eUpper].theta[WATER], EMS[eUpper].theta[WATER_PREF], EMS[eUpper].theta[AIR]); + throw IOException("Cannot transfer water within the snowpack in transportWater()", AT); } } @@ -1035,6 +1024,7 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat } if (EMS[eLower].theta[SOIL] < Constants::eps2) { Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += excess_water*Constants::density_water; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += excess_water*Constants::density_water; } // Take care of Solutes for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { @@ -1061,54 +1051,50 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat // update volumetric contents, masses and density EMS[eUpper].theta[WATER]=W_upper-dThetaW_upper; EMS[eLower].theta[WATER]=W_lower+dThetaW_lower; - EMS[eUpper].theta[AIR] = 1. - EMS[eUpper].theta[WATER] - EMS[eUpper].theta[ICE] - EMS[eUpper].theta[SOIL]; - EMS[eLower].theta[AIR] = 1. - EMS[eLower].theta[WATER] - EMS[eLower].theta[ICE] - EMS[eLower].theta[SOIL]; + EMS[eUpper].theta[AIR] = 1. - EMS[eUpper].theta[WATER] - EMS[eUpper].theta[WATER_PREF] - EMS[eUpper].theta[ICE] - EMS[eUpper].theta[SOIL]; + EMS[eLower].theta[AIR] = 1. - EMS[eLower].theta[WATER] - EMS[eLower].theta[WATER_PREF] - EMS[eLower].theta[ICE] - EMS[eLower].theta[SOIL]; EMS[eUpper].M -= L_upper * Constants::density_water * dThetaW_upper; assert(EMS[eUpper].M >= (-Constants::eps2)); //mass must be positive EMS[eLower].M += L_lower * Constants::density_water * dThetaW_lower; assert(EMS[eLower].M >= (-Constants::eps2)); //mass must be positive - EMS[eUpper].Rho = (EMS[eUpper].theta[ICE] * Constants::density_ice) - + (EMS[eUpper].theta[WATER] * Constants::density_water) - + (EMS[eUpper].theta[SOIL] * EMS[eUpper].soil[SOIL_RHO]); + EMS[eUpper].updDensity(); assert(EMS[eUpper].Rho>=0. || EMS[eUpper].Rho==IOUtils::nodata); //we want positive density - EMS[eLower].Rho = (EMS[eLower].theta[ICE] * Constants::density_ice) - + (EMS[eLower].theta[WATER] * Constants::density_water) - + (EMS[eLower].theta[SOIL] * EMS[eLower].soil[SOIL_RHO]); + EMS[eLower].updDensity(); assert(EMS[eLower].Rho>=0. || EMS[eLower].Rho==IOUtils::nodata); //we want positive density if (EMS[eUpper].theta[SOIL] < Constants::eps2) { if (!(EMS[eUpper].theta[AIR] >= -Constants::eps)) { prn_msg(__FILE__, __LINE__, "err", Mdata.date, - "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf air:%le", - eUpper, nE, EMS[eUpper].Rho, EMS[eUpper].theta[ICE], EMS[eUpper].theta[WATER], EMS[eUpper].theta[AIR]); + "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf wat_pref:%lf air:%le", + eUpper, nE, EMS[eUpper].Rho, EMS[eUpper].theta[ICE], EMS[eUpper].theta[WATER], EMS[eUpper].theta[WATER_PREF], EMS[eUpper].theta[AIR]); throw IOException("Cannot transfer water within the snowpack in transportWater()", AT); } } // Update snowpack runoff with soil. Note: in case of no soil layers, or lowest soil element: the runoff for the lowest element is updated outside the loop. if (useSoilLayers && eUpper == Xdata.SoilNode) { Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += L_lower * Constants::density_water * dThetaW_lower + excess_water * Constants::density_water; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += L_lower * Constants::density_water * dThetaW_lower + excess_water * Constants::density_water; } } // end positive water movement } else { //If eLower is soil (so water would be transported INTO soil), only remove water from snow element and don't put in soil, but in surfacefluxrate: //dThetaW_upper = std::max(0, dThetaW_upper); if(eLower==Xdata.SoilNode-1 && eUpper==Xdata.SoilNode) { Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += L_upper * Constants::density_water * dThetaW_upper + excess_water * Constants::density_water; - RichardsEquationSolver1d.surfacefluxrate+=((dThetaW_upper*L_upper)+excess_water)/(sn_dt); //surfacefluxrate is used for the Neumann BC in the Richards solver. Note: W0 is m^3/m^3 + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += L_upper * Constants::density_water * dThetaW_upper + excess_water * Constants::density_water; + RichardsEquationSolver1d_matrix.surfacefluxrate+=((dThetaW_upper*L_upper)+excess_water)/(sn_dt); //surfacefluxrate is used for the Neumann BC in the Richards solver. Note: W0 is m^3/m^3 //note: we devide by snowpack time-step (sn_dt), and not by (sn_dt/WatCalc), as we will spread the amount of runoff evenly over the snowpack time step. excess_water=0.; //Remove water from e0: // update volumetric contents, masses and density EMS[eUpper].theta[WATER]=W_upper-dThetaW_upper; - EMS[eUpper].theta[AIR] = 1. - EMS[eUpper].theta[WATER] - EMS[eUpper].theta[ICE] - EMS[eUpper].theta[SOIL]; + EMS[eUpper].theta[AIR] = 1. - EMS[eUpper].theta[WATER] - EMS[eUpper].theta[WATER_PREF] - EMS[eUpper].theta[ICE] - EMS[eUpper].theta[SOIL]; EMS[eUpper].M -= L_upper * Constants::density_water * dThetaW_upper; - EMS[eUpper].Rho = (EMS[eUpper].theta[ICE] * Constants::density_ice) - + (EMS[eUpper].theta[WATER] * Constants::density_water) - + (EMS[eUpper].theta[SOIL] * EMS[eUpper].soil[SOIL_RHO]); + EMS[eUpper].updDensity(); if (EMS[eUpper].theta[SOIL] < Constants::eps2) { if (!(EMS[eUpper].theta[AIR] >= -Constants::eps)) { prn_msg(__FILE__, __LINE__, "err", Mdata.date, - "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf air:%le", - eUpper, nE, EMS[eUpper].Rho, EMS[eUpper].theta[ICE], EMS[eUpper].theta[WATER], EMS[eUpper].theta[AIR]); + "Volume contents: e:%d nE:%d rho:%lf ice:%lf wat:%lf wat_pref:%lf air:%le", + eUpper, nE, EMS[eUpper].Rho, EMS[eUpper].theta[ICE], EMS[eUpper].theta[WATER], EMS[eUpper].theta[WATER_PREF], EMS[eUpper].theta[AIR]); throw IOException("Cannot transfer water within the snowpack in transportWater()", AT); } } @@ -1122,7 +1108,22 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat //Now solve richards equation: if((iwatertransportmodel_snow == RICHARDSEQUATION && nE>0) || (iwatertransportmodel_soil == RICHARDSEQUATION && Xdata.SoilNode > 0)) { - RichardsEquationSolver1d.SolveRichardsEquation(Xdata, Sdata); + double dummy_ql = 0.; // A dummy_ql, that may be sent to Richards equation, when evaporation / condensation doesn't need to be considered, to keep the original ql intact, so it can be treated as sublimation / deposition later + + // Only send ql if Richards equation will solve the upper element and thus should take care of evaporation / condensation: + const bool isTopLayerSolvedByREQ = (nE == Xdata.SoilNode || (nE > Xdata.SoilNode && iwatertransportmodel_snow == RICHARDSEQUATION)); + + // Only send ql if ql should first be considered as evaporation / condensation, and NOT sublimation / deposition, depending on surface temperature: + const double meltfreeze_tk = (Xdata.getNumberOfElements()>0)? Xdata.Edata[Xdata.getNumberOfElements()-1].meltfreeze_tk : Constants::meltfreeze_tk; + const bool isSurfaceMelting = !(NDS[nE].T < meltfreeze_tk); + + if(enable_vapour_transport) { + RichardsEquationSolver1d_matrix.SolveRichardsEquation(Xdata, Sdata, dummy_ql, Mdata.date); + } else { + RichardsEquationSolver1d_matrix.SolveRichardsEquation(Xdata, Sdata, ((isTopLayerSolvedByREQ && isSurfaceMelting) || (variant == "SEAICE" && ql < 0.)) ? (ql) : (dummy_ql), Mdata.date); + } + + if(Xdata.getNumberOfElements() > Xdata.SoilNode && enable_pref_flow) RichardsEquationSolver1d_pref.SolveRichardsEquation(Xdata, Sdata, dummy_ql, Mdata.date); // Matrix flow will take care of potential evaporation/condensation, provided by ql, so send dummy_ql for preferential flow } // The TOP element is very important because it is always losing mass--the strain state @@ -1134,42 +1135,57 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat EMS[eTop].E = EMS[eTop].Eps = EMS[eTop].dEps = EMS[eTop].Eps_e = EMS[eTop].Eps_v = EMS[eTop].S = 0.0; // RUNOFF at bottom of either snowpack or soil - if(!useSoilLayers || iwatertransportmodel_soil != RICHARDSEQUATION) { //Only if lowest element is snow or we do not use RE for soil. - // Determine the additional storage capacity due to refreezing - const double dth_w = EMS[0].c[TEMPERATURE] * EMS[0].Rho / Constants::lh_fusion / Constants::density_water - * std::max(0., EMS[0].melting_tk-EMS[0].Te); - if (EMS[0].theta[SOIL] < Constants::eps2) { - Wres = std::min((1. - EMS[0].theta[ICE]) * Constants::density_ice / Constants::density_water, - EMS[0].res_wat_cont + dth_w); - } else { // treat soil separately - Wres = std::min(Constants::density_ice/Constants::density_water*(1. - EMS[0].theta[ICE] - EMS[0].theta[SOIL]), - EMS[0].soilFieldCapacity() + dth_w); - } - Wres = std::max(0., Wres); - - const double W0 = EMS[0].theta[WATER]; - if ((W0 > Wres) // NOTE: if water_layer is set, do not drain water element on top of soil - && !(water_layer && (EMS[0].theta[ICE] < Snowpack::min_ice_content) - && (EMS[0].theta[SOIL] < Constants::eps2))) { - const double dM = EMS[0].L * Constants::density_water * (W0 - Wres); - EMS[0].M -= dM; - assert(EMS[0].M >= (-Constants::eps2)); //mass must be positive - EMS[0].theta[WATER] = Wres; - EMS[0].theta[AIR] = 1. - EMS[0].theta[WATER] - EMS[0].theta[ICE] - EMS[0].theta[SOIL]; - EMS[0].Rho = (EMS[0].theta[ICE] * Constants::density_ice) - + (EMS[0].theta[WATER] * Constants::density_water) - + (EMS[0].theta[SOIL] * EMS[0].soil[SOIL_RHO]); - assert(EMS[0].Rho>=0. || EMS[0].Rho==IOUtils::nodata); //we want positive density - // Note that remaining excess_water should also be routed to MS_SOIL_RUNOFF and MS_SNOWPACK_RUNOFF + if(variant != "SEAICE") { //Not for sea ice, where we assume the lowest element to be under water. + if((!useSoilLayers && iwatertransportmodel_snow != RICHARDSEQUATION) || iwatertransportmodel_soil != RICHARDSEQUATION) { //Only if lowest element is snow or we do not use RE for soil. + // Determine the additional storage capacity due to refreezing + const double dth_w = EMS[0].c[TEMPERATURE] * EMS[0].Rho / Constants::lh_fusion / Constants::density_water + * std::max(0., EMS[0].meltfreeze_tk-EMS[0].Te); if (EMS[0].theta[SOIL] < Constants::eps2) { - Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += dM + (excess_water * Constants::density_water); + Wres = std::min((1. - EMS[0].theta[ICE]) * Constants::density_ice / Constants::density_water, + EMS[0].res_wat_cont + dth_w); + } else { // treat soil separately + Wres = std::min(Constants::density_ice/Constants::density_water*(1. - EMS[0].theta[ICE] - EMS[0].theta[SOIL]), + EMS[0].soilFieldCapacity() + dth_w); } - Sdata.mass[SurfaceFluxes::MS_SOIL_RUNOFF] += dM + (excess_water * Constants::density_water); - for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { - Sdata.load[ii] += (EMS[0].conc[WATER][ii] * dM / S_TO_H(sn_dt)); + Wres = std::max(0., Wres); + + // Add excess water to the bottom element, such that it does not get lost + EMS[0].theta[WATER] += excess_water / EMS[0].L; + excess_water = 0.; + + const double W0 = EMS[0].theta[WATER]; + if ((W0 > Wres) // NOTE: if water_layer is set, do not drain water element on top of soil + && !(water_layer && (EMS[0].theta[ICE] < Snowpack::min_ice_content) + && (EMS[0].theta[SOIL] < Constants::eps2))) { + double dM = EMS[0].L * Constants::density_water * (W0 - Wres); + // Safety check: ensure we don't remove more mass than available + if (dM > EMS[0].M) { + SPDLOG_WARN("Removing more than available mass on bottom-most layer ! Mass will be 0 !"); + dM = EMS[0].M; // Limit to available mass + EMS[0].theta[WATER] = Wres; // Set to residual water content + } + EMS[0].M -= dM; + assert(EMS[0].M >= (-Constants::eps2)); //mass must be positive + EMS[0].theta[WATER] = Wres; + EMS[0].theta[AIR] = 1. - EMS[0].theta[WATER] - EMS[0].theta[WATER_PREF] - EMS[0].theta[ICE] - EMS[0].theta[SOIL]; + EMS[0].updDensity(); + assert(EMS[0].Rho>=0. || EMS[0].Rho==IOUtils::nodata); //we want positive density + // Note that remaining excess_water should also be routed to MS_SOIL_RUNOFF and MS_SNOWPACK_RUNOFF + if (EMS[0].theta[SOIL] < Constants::eps2) { + Sdata.mass[SurfaceFluxes::MS_SNOWPACK_RUNOFF] += dM + (excess_water * Constants::density_water); + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += dM + (excess_water * Constants::density_water); + } + Sdata.mass[SurfaceFluxes::MS_SOIL_RUNOFF] += dM + (excess_water * Constants::density_water); + for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { + Sdata.load[ii] += (EMS[0].conc[WATER][ii] * dM / S_TO_H(sn_dt)); + } } } } + // If no snow, add rain in MS_SURFACE_MASS_FLUX + if(EMS[nE-1].theta[SOIL] > 0) { + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += Mdata.psum * Mdata.psum_ph; + } } /** @@ -1211,11 +1227,15 @@ void WaterTransport::transportWater(const CurrentMeteo& Mdata, SnowStation& Xdat * @param Sdata * @param Mdata */ -void WaterTransport::compTransportMass(const CurrentMeteo& Mdata, const double& ql, - SnowStation& Xdata, SurfaceFluxes& Sdata) +void WaterTransport::compTransportMass(const CurrentMeteo& Mdata, + SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql) { - RichardsEquationSolver1d.surfacefluxrate=0.; //These are for the interface of snowpack with the richards solver. Initialize it to 0. - RichardsEquationSolver1d.soilsurfacesourceflux=0.; + RichardsEquationSolver1d_matrix.surfacefluxrate=0.; //These are for the interface of snowpack with the richards solver. Initialize it to 0. + RichardsEquationSolver1d_matrix.soilsurfacesourceflux=0.; + + RichardsEquationSolver1d_pref.surfacefluxrate=0.; //These are for the interface of snowpack with the richards solver. Initialize it to 0. + RichardsEquationSolver1d_pref.soilsurfacesourceflux=0.; + // Do the checks for the WaterTransport model chosen: if(iwatertransportmodel_snow != BUCKET && iwatertransportmodel_snow != NIED && iwatertransportmodel_snow != RICHARDSEQUATION) { @@ -1238,17 +1258,13 @@ void WaterTransport::compTransportMass(const CurrentMeteo& Mdata, const double& throw; } - if(iwatertransportmodel_snow == RICHARDSEQUATION && !useSoilLayers) { - prn_msg( __FILE__, __LINE__, "err", Mdata.date, "The implementation of RICHARDSEQUATION for snow without soil layers is not implemented and tested! It is not clear which lower boundary condition makes sense and the snow-soil interfaceflux is only defined with soil."); - throw; - } - // First, consider no soil with no snow on the ground and deal with possible rain water if (!useSoilLayers && (Xdata.getNumberOfNodes() == Xdata.SoilNode+1)) { if (Mdata.psum > 0. && Mdata.psum_ph>0.) { //there is some rain double precip_rain = Mdata.psum * Mdata.psum_ph; Sdata.mass[SurfaceFluxes::MS_RAIN] += precip_rain; Sdata.mass[SurfaceFluxes::MS_SOIL_RUNOFF] += precip_rain; + Sdata.mass[SurfaceFluxes::MS_SURFACE_MASS_FLUX] += precip_rain; for (size_t ii = 0; ii < Xdata.number_of_solutes; ii++) { Sdata.load[ii] += Mdata.conc[ii] * precip_rain /*/ S_TO_H(sn_dt)*/; } @@ -1256,12 +1272,15 @@ void WaterTransport::compTransportMass(const CurrentMeteo& Mdata, const double& return; } - compSurfaceSublimation(Mdata, ql, Xdata, Sdata); + if (!enable_vapour_transport) { + compTopFlux(ql, Xdata, Sdata); + } mergingElements(Xdata, Sdata); try { adjustDensity(Xdata); - transportWater(Mdata, Xdata, Sdata); + if (variant=="SEAICE" && Xdata.Seaice!=NULL && iwatertransportmodel_snow == BUCKET) Xdata.Seaice->compFlooding(Xdata, Sdata); + transportWater(Mdata, Xdata, Sdata, ql); } catch(const exception&){ prn_msg( __FILE__, __LINE__, "err", Mdata.date, "Error in transportMass()"); throw; diff --git a/third_party/snowpack/snowpackCore/WaterTransport.h b/third_party/snowpack/snowpackCore/WaterTransport.h index 2e4431f1..f18c0ff6 100644 --- a/third_party/snowpack/snowpackCore/WaterTransport.h +++ b/third_party/snowpack/snowpackCore/WaterTransport.h @@ -40,7 +40,15 @@ class WaterTransport { public: WaterTransport(const SnowpackConfig& cfg); - void compTransportMass(const CurrentMeteo& Mdata, const double& ql, SnowStation& Xdata, SurfaceFluxes& Sdata); + virtual ~WaterTransport() {} + void compTransportMass(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql); + + protected: + void mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata); + void adjustDensity(SnowStation& Xdata); + + //To prevent string comparisons, we define an enumerated list: + enum watertransportmodels{UNDEFINED, BUCKET, NIED, RICHARDSEQUATION}; private: //The following 3 functions are used in WaterTransport model "NIED" @@ -48,29 +56,27 @@ class WaterTransport { double Bisection(const double minval, const double maxval, double P[]); void KHCalcNaga(const double RG, const double Dens, double ThR, const double WatCnt, const double SatuK, double &Rh, double &Rk); - void compSurfaceSublimation(const CurrentMeteo& Mdata, double ql, SnowStation& Xdata, SurfaceFluxes& Sdata); - - void mergingElements(SnowStation& Xdata, SurfaceFluxes& Sdata); - - void adjustDensity(SnowStation& Xdata); - - void transportWater(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata); + void compTopFlux(double& ql, SnowStation& Xdata, SurfaceFluxes& Sdata); + void transportWater(const CurrentMeteo& Mdata, SnowStation& Xdata, SurfaceFluxes& Sdata, double& ql); - ReSolver1d RichardsEquationSolver1d; + ReSolver1d RichardsEquationSolver1d_matrix; + ReSolver1d RichardsEquationSolver1d_pref; std::string variant; - //To prevent string comparisons, we define an enumerated list: - enum watertransportmodels{UNDEFINED, BUCKET, NIED, RICHARDSEQUATION}; watertransportmodels iwatertransportmodel_snow, iwatertransportmodel_soil; std::string watertransportmodel_snow; std::string watertransportmodel_soil; + bool enable_pref_flow; + std::string pref_flow_rain_input_domain; + double sn_dt; double hoar_thresh_rh, hoar_thresh_vw, hoar_thresh_ta; double hoar_density_buried, hoar_density_surf, hoar_min_size_buried; - double minimum_l_element; + double minimum_l_element, comb_thresh_l; bool useSoilLayers, water_layer, jam; + + bool enable_vapour_transport; }; #endif //End of WaterTransport.h - diff --git a/third_party/snowpack/vanGenuchten.cc b/third_party/snowpack/vanGenuchten.cc new file mode 100644 index 00000000..a05fd94b --- /dev/null +++ b/third_party/snowpack/vanGenuchten.cc @@ -0,0 +1,635 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ + +#include "vanGenuchten.h" +#include "snowpackCore/ReSolver1d.h" +#include "Constants.h" +#include "Utils.h" + +/** + * @file vanGenuchten.cc + * @version 17.06 + * @brief This module contains the van Genuchten model for the water retention curve + */ + + +/** + * @brief Class constructor \n + * @author Nander Wever + * @param pEMS pointer to the ElementData class which owns the van Genuchten class, so the van Genuchten class can access objects from the ElementData class. + */ +vanGenuchten::vanGenuchten(ElementData& pEMS) : + EMS(&pEMS), theta_r(0.), theta_s(1.), alpha(0.), n(0.), m(0.), h_e(0.), Sc(0.), ksat(0.), field_capacity(0), defined(false) {} + + +/** + * @brief Copy constructor \n + * @author Nander Wever + * @param c Class to copy + */ +vanGenuchten::vanGenuchten(const vanGenuchten& c) : + EMS(c.EMS), theta_r(c.theta_r), theta_s(c.theta_s), alpha(c.alpha), n(c.n), m(c.m), h_e(c.h_e), Sc(c.Sc), ksat(c.ksat), field_capacity(c.field_capacity), defined(c.defined) {} + + +/** + * @brief Assignment operator \n + * @author Nander Wever + * @param source Class to assign + */ +vanGenuchten& vanGenuchten::operator=(const vanGenuchten& source) { + if(this != &source) { + theta_r = source.theta_r; + theta_s = source.theta_s; + alpha = source.alpha; + n = source.n; + m = source.m; + h_e = source.h_e; + Sc = source.Sc; + ksat = source.ksat; + + defined = source.defined; + } + return *this; +} + +std::iostream& operator<<(std::iostream& os, const vanGenuchten& data) +{ + os.write(reinterpret_cast(&data.EMS), sizeof(data.EMS)); + os.write(reinterpret_cast(&data.theta_r), sizeof(data.theta_r)); + os.write(reinterpret_cast(&data.theta_s), sizeof(data.theta_s)); + os.write(reinterpret_cast(&data.alpha), sizeof(data.alpha)); + os.write(reinterpret_cast(&data.n), sizeof(data.n)); + os.write(reinterpret_cast(&data.m), sizeof(data.m)); + os.write(reinterpret_cast(&data.h_e), sizeof(data.h_e)); + os.write(reinterpret_cast(&data.Sc), sizeof(data.Sc)); + os.write(reinterpret_cast(&data.ksat), sizeof(data.ksat)); + os.write(reinterpret_cast(&data.defined), sizeof(data.defined)); + return os; +} + +std::iostream& operator>>(std::iostream& is, vanGenuchten& data) +{ + is.read(reinterpret_cast(&data.EMS), sizeof(data.EMS)); + is.read(reinterpret_cast(&data.theta_r), sizeof(data.theta_r)); + is.read(reinterpret_cast(&data.theta_s), sizeof(data.theta_s)); + is.read(reinterpret_cast(&data.alpha), sizeof(data.alpha)); + is.read(reinterpret_cast(&data.n), sizeof(data.n)); + is.read(reinterpret_cast(&data.m), sizeof(data.m)); + is.read(reinterpret_cast(&data.h_e), sizeof(data.h_e)); + is.read(reinterpret_cast(&data.Sc), sizeof(data.Sc)); + is.read(reinterpret_cast(&data.ksat), sizeof(data.ksat)); + is.read(reinterpret_cast(&data.defined), sizeof(data.defined)); + return is; +} + +/** + * @brief Calculate air entry pressure head \n + * Air entry pressure head in [m] that corresponds to a maximum pore size (using Young-Laplace Equation).\n + * This is a required value for specifying water retention curves, see Ippisch et al. (2006).\n + * @author Nander Wever + * @param MaximumPoreSize Maximum pore size (diameter, not radius!) [m] + * @param Temperature Temperature for determining surface tension [K] + */ +double vanGenuchten::AirEntryPressureHead(const double MaximumPoreSize, const double Temperature) +{ + //Surface tension is dependent on the temperature. Most simulations will be in the temperature range of -20 - +20 degC. + //Source: http://en.wikipedia.org/wiki/Surface_tension + //Surface tension of water in N/m. + const double SurfaceTension = (Temperature > 293.)? 0.07197 : 0.07564; //Value for 25 degC vs for 0 degC + const double delta_P=-1.*(2.*SurfaceTension)/(0.5*MaximumPoreSize); + const double air_entry_head=delta_P/(Constants::density_water*Constants::g); + + return air_entry_head; +} + + +/** + * @brief Set soil parameters for a given soil type \n + * Set soil parameters for a given soil type \n + * @author Nander Wever + * @param type Soil type + */ +void vanGenuchten::SetSoil(const SoilTypes type) +{ + double MaximumPoreSize=0.; //Maximum pore size (diameter) in [m] + + //Set van Genuchten parameters + switch (type) { + case ORGANIC: + //Organic: Nemes (2001), Development of Soil Hydraulic Pedotransfer Functions on a European scale: Their Usefulness in the Assessment of Soil Quality. + theta_r=0.01; + theta_s=0.766; + alpha=1.3; + n=1.2039; + ksat=8.000/(365.*24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.2; + break; + + //ROSETTA Class Average Hydraulic Parameters: http://ars.usda.gov/Services/docs.htm?docid=8955 + //Field capacity computed from: K.E. Saxton et al., 1986, Estimating generalized soil-water characteristics from texture. Soil Sci. Soc. Amer. J. 50(4):1031-1036 + case CLAY: + theta_r=0.098; + theta_s=0.459; + n=1.253; + alpha=1.496; + ksat=0.14757/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.479; + break; + + case CLAYLOAM: + theta_r=0.079; + theta_s=0.442; + n=1.416; + alpha=1.581; + ksat=0.0818/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.336; + break; + + case LOAM: + theta_r=0.061; + theta_s=0.399; + alpha=1.11; + n=1.47; + ksat=0.02947/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.262; + break; + + case LOAMYSAND: + theta_r=0.049; + theta_s=0.39; + n=1.746; + alpha=3.475; + ksat=1.052/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.171; + break; + + case SAND: + theta_r=0.053; + theta_s=0.375; + n=3.177; + alpha=3.524; + ksat=6.427/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.132; + break; + + case SANDYCLAY: + theta_r=0.117; + theta_s=0.385; + n=1.208; + alpha=3.342; + ksat=0.1135/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.368; + break; + + case SANDYCLAYLOAM: + theta_r=0.063; + theta_s=0.384; + n=1.330; + alpha=2.109; + ksat=0.1318/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.272; + break; + + case SANDYLOAM: + theta_r=0.039; + theta_s=0.387; + n=1.4488; + alpha=2.667; + ksat=0.3828/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.205; + break; + + case SILT: + theta_r=0.050; + theta_s=0.489; + n=1.6788; + alpha=0.6577; + ksat=0.4375/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.316; + break; + + case SILTYCLAY: + theta_r=0.111; + theta_s=0.481; + n=1.321; + alpha=1.622; + ksat=0.09616/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.452; + break; + + case SILTYCLAYLOAM: + theta_r=0.090; + theta_s=0.482; + n=1.5205; + alpha=0.8395; + ksat=0.1112/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.367; + break; + + case SILTLOAM: + theta_r=0.065; + theta_s=0.439; + n=1.6634; + alpha=0.5058; + ksat=0.1824/(24.*60.*60.); + MaximumPoreSize=0.005; + field_capacity=0.292; + break; + + case WFJGRAVELSAND: //Gravel/sand + theta_r=0.01; + theta_s=0.35; + n=4.5; + alpha=3.5; + ksat=0.000003171; //Equal to 100 m/year, for clean sand and silty sand, according to: http://web.ead.anl.gov/resrad/datacoll/conuct.htm + MaximumPoreSize=0.005; + field_capacity=0.07; + break; + + default: + throw mio::UnknownValueException("Unknown soil type", AT); + + } + + h_e=vanGenuchten::AirEntryPressureHead(MaximumPoreSize, 273.); + m=(n-1.)/(n); + + return; +} + + +/** + * @brief Calculating pressure head from water content \n + * The following function calculates the pressure head belonging to a given water content \n + * @author Nander Wever + * @param theta Water content (m^3/m^3) + * @param h_d Dry limit of pressure head + */ +double vanGenuchten::fromTHETAtoH(const double theta, const double h_d) +{ + //Inverse of Van Genuchten (1980), Equation 21: + double returnvalue; + if (theta<=theta_r) { + returnvalue=h_d; + } else { + if (theta >= theta_s) { + returnvalue=h_e; + } else { + returnvalue=-1.*(1./alpha)*pow( (pow(Sc*((theta-theta_r)/(theta_s-theta_r)), (-1./m)) - 1.), (1./n)); + } + } + return returnvalue; +} + + +/** + * @brief Calculating pressure head from water content when ice is present \n + * The following function calculates the pressure head belonging to a given water content when ice is present \n + * @author Nander Wever + * @param theta Water content (m^3/m^3) + * @param h_d Dry limit of pressure head + * @param theta_i Ice content (m^3/m^3) + */ +double vanGenuchten::fromTHETAtoHforICE(const double theta, const double h_d, const double theta_i) +{ + //To have same return value as fromTHETAtoH, call this function with theta_i==0. + return fromTHETAtoH(theta+(theta_i*(Constants::density_ice/Constants::density_water)), h_d); +} + + +/** + * @brief Calculating volumetric water content from pressure head \n + * The following function calculates the volumetric water content belonging to a given pressure head \n + * @author Nander Wever + * @param h Pressure head (m) + */ +double vanGenuchten::fromHtoTHETA(const double h) +{ + double returnvalue=0.; + //Van Genuchten (1980), Equation 21: + if (h>h_e) { //Saturation + returnvalue=theta_s; + } else { + returnvalue=theta_r+( (theta_s-theta_r)*(1./Sc)*pow(1.+pow((alpha*fabs(h)),n),(-1.*m)) ); + } + return returnvalue; +} + + +/** + * @brief Calculating volumetric water content from pressure head when ice is present \n + * The following function calculates the volumetric water content belonging to a given pressure head when ice is present \n + * @author Nander Wever + * @param h Pressure head (m) + * @param theta_i Ice content (m^3/m^3) + */ +double vanGenuchten::fromHtoTHETAforICE(const double h, const double theta_i) +{ + //To have same return value as fromHtoTHETA, call this function with theta_i==0. + return fromHtoTHETA(h)-(theta_i*(Constants::density_ice/Constants::density_water)); +} + + +/** + * @brief Specific moisture capacity\n + * This function should return *exact* value of the derivative d.theta / d.h, otherwise RE-solver is unstable. + * When modifying the water retention curve, for example in the dry limit, the specific moisture capacity function needs to be modified too, to remain an exact derivative.\n + * @author Nander Wever + * @param h Pressure head (m) + */ +double vanGenuchten::dtheta_dh(const double h) { + // To determine derivative in wxMaxima, do the following: + // > theta(h) = theta_r + ( (theta_s-theta_r)*(1./Sc)*(1.+((alpha*abs(h))^n))^(-m) ) + // > diff(%o1, h) + // result: -(pow((alpha*fabs(h)),n)*pow((pow((alpha*fabs(h)), n)+1.),-m-1.)*m*n*(theta_s-theta_r))/(h*Sc), rewrites to: + return alpha*n*m*((theta_s-theta_r)/Sc)*(pow((alpha*fabs(h)), (n-1.)))*(pow(1.+pow((alpha*fabs(h)), n), (-m-1.))); +} + + +/** + * @brief Initialize van Genuchten model for snow layers\n + * @author Nander Wever + * @param VGModelTypeSnow van Genuchten model parameterization to use + * @param K_PARAM hydraulic conductivity parameterization to use + * @param matrix true: set parameters for matrix domain. false: set parameters for preferential flow domain + * @param seaice if true: use some tweaks for sea ice. + */ +void vanGenuchten::SetVGParamsSnow(const VanGenuchten_ModelTypesSnow VGModelTypeSnow, const K_Parameterizations K_PARAM, const bool& matrix, const bool& seaice) +{ + if (EMS->theta[ICE] > 0.75) { + theta_r=0.; + } else { + if(matrix==true) { + //Scaling theta_r between 0 and 0.02: + const double TuningFactor=0.75; //Tuning factor for scaling + //Increase theta_r in case of wetting: + theta_r=std::max(0., std::min(0.02, std::max(theta_r, TuningFactor*EMS->theta[WATER]))); + //Decrease theta_r in case of refreezing: + theta_r=std::max(0., std::min(theta_r, EMS->theta[WATER]-(ReSolver1d::REQUIRED_ACCURACY_THETA/10.))); + } else { + //For preferential flow, we fix theta_r to 0: + theta_r=0.; + } + } + + theta_s=(1. - EMS->theta[ICE])*(Constants::density_ice/Constants::density_water); + + if(theta_srg; //Backup original grain size value + + switch ( VGModelTypeSnow ) { //Set Van Genuchten parameters for snow, depending on the chosen model for snow. + + case YAMAGUCHI2012: + { + //Calculate ratio density/grain size (see Yamaguchi (2012)): + double tmp_rho_d=(EMS->theta[ICE]*Constants::density_ice)/( (2.*EMS->rg) / 1000.); + //Limit tmp_rho_d to reasonable values, so alpha and especially n remain in numerically stable bounds: + if(seaice) { + tmp_rho_d=std::max(100000., tmp_rho_d); + } else { + tmp_rho_d=std::max(2000., tmp_rho_d); + } + alpha=4.4E6*pow(tmp_rho_d, -0.98); //See Eq. 6 in Yamaguchi (2012). + n=1.+2.7E-3*pow(tmp_rho_d, 0.61); //See Eq. 7 in Yamaguchi (2012). + break; + } + + case YAMAGUCHI2010: + { + //Limit grain size, to stay within the bounds of the Van Genuchten parameterizations for snow. + const double GRAINRADIUSLOWERTHRESHOLD=0.0; //Lower threshold + const double GRAINRADIUSUPPERTHRESHOLD=2.0; //Upper threshold. 2.02 is value for n>1, which is required. + //Now limit grain sizes + if(EMS->rg>GRAINRADIUSUPPERTHRESHOLD) EMS->rg=GRAINRADIUSUPPERTHRESHOLD; + if(EMS->rgrg=GRAINRADIUSLOWERTHRESHOLD; + + //Note: rg is in mm, and it is the radius (confirmed by Charles, see DataClasses.h) + alpha=7.3*(2.*EMS->rg)+1.9; //See Eq. 12 (note d is defined as diameter in mm!) in Yamaguchi (2010). + n=-3.3*(2.*EMS->rg)+14.4; //See Eq. 11 (note d is defined as diameter in mm!) in Yamaguchi (2010). + break; + } + + case YAMAGUCHI2010_ADAPTED: + { + //Limit grain size, the parameterizations still hold, but high values of alpha and small values of n are causing numerical troubles. + const double GRAINRADIUSLOWERTHRESHOLD=0.0; //Lower threshold + const double GRAINRADIUSUPPERTHRESHOLD=4.0; //Upper threshold + //Now limit grain sizes + if(EMS->rg>GRAINRADIUSUPPERTHRESHOLD) EMS->rg=GRAINRADIUSUPPERTHRESHOLD; + if(EMS->rgrg=GRAINRADIUSLOWERTHRESHOLD; + + alpha=7.3*(2.*EMS->rg)+1.9; //See Eq. 12 (note d is defined as diameter in mm!) in Yamaguchi (2010). + //Instead of the linear fit in Yamaguchi (2010), Hirashima (2011) approximated the data with a power law fit, valid for the whole range of grain sizes: + n=15.68*exp(-0.46*(2.*EMS->rg)) + 1.; //Hirashima (2011), Eq. 17 + break; + } + + case DAANEN: + { + const double GRAINRADIUSLOWERTHRESHOLD=0.0; //Equal to Yamaguchi adapted + const double GRAINRADIUSUPPERTHRESHOLD=4.0; //Equal to Yamaguchi adapted + //Now limit grain sizes + if(EMS->rg>GRAINRADIUSUPPERTHRESHOLD) EMS->rg=GRAINRADIUSUPPERTHRESHOLD; + if(EMS->rgrg=GRAINRADIUSLOWERTHRESHOLD; + + alpha=30.*(2.*EMS->rg)+12.; + n=0.800*(2.*EMS->rg)+3.; + break; + } + default: + throw mio::UnknownValueException("Unknown Van Genuchten parameter for snow", AT); + + } + + + const double tmp_dynamic_viscosity_water=0.001792; // In Pa/s, from WaterTransport code by Hirashima: 0.001792 + const double tmp_phi = (1. - EMS->theta[ICE]); // Porosity + if (tmp_phi > 0.25) { // For low density + switch ( K_PARAM ) { //Set saturated hydraulic conductivity + + case CALONNE: + //See: Calonne et al., 3-D image-based numerical computations of snow permeability: links to specific surface area, density, and microstructural anisotropy, TC, 2012. + ksat=0.75 * (EMS->ogs / 1000.)*(EMS->ogs / 1000.) * exp(-0.013 * EMS->theta[ICE] * Constants::density_ice) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; + break; + + case KOZENYCARMAN: + ksat=(EMS->sp * EMS->sp) * (tmp_phi * tmp_phi * tmp_phi * (EMS->ogs / 1000.) * (EMS->ogs / 1000.)) / (150. * ( EMS->theta[ICE] * EMS->theta[ICE] )) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; + break; + + case SHIMIZU: + //This formulation for ksat is proposed by Shimizu (1970), and is valid up to 450 kg/m^3. See Equation 5 in Jordan, 1999 + conversion from hydraulic permeability to hydraulic conductivity. + if(EMS->theta[ICE] * Constants::density_ice>450.) { + ksat=0.077 * (2.*EMS->rg / 1000.)*(2.*EMS->rg / 1000.) * exp(-0.0078 * 450.) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; + } else { + ksat=0.077 * (2.*EMS->rg / 1000.)*(2.*EMS->rg / 1000.) * exp(-0.0078 * EMS->theta[ICE] * Constants::density_ice) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; + } + break; + + default: + throw mio::UnknownValueException("Unknown hydraulic conductivity parameter", AT); + + } + } else { //For high density + // Eq. 5 in Golden, K. M., H. Eicken, A. L. Heaton, J. Miner, D. J. Pringle, and J. Zhu (2007), Thermal evolution of permeability and microstructure in sea ice, Geophys. Res. Lett., 34, L16501, doi:10.1029/2007GL030447: + ksat = 3E-8 * pow((1. - std::min(1., EMS->theta[ICE])), 3.) * (Constants::g * Constants::density_water) / tmp_dynamic_viscosity_water; + } + + if (seaice) ksat = std::min(ksat, 1E-3); + + //Set air entry pressure + h_e=vanGenuchten::AirEntryPressureHead(0.005, 273.); + + //Calculate m: + m=(n-1.)/n; + + //Calculate saturation at cut-off point h_e (see Ippisch et al (2006)). + Sc=pow((1.+pow(alpha*fabs(h_e), n)), -1.*m); + + //Restore original grain size value from backup + EMS->rg=tmprg; + + //The VG model has been initialized + defined=true; + + return; +} + + +/** + * @brief Enforce thermal equilibrium in the soil layers\n + * @param fixTemp If true, keep element temperature fixed and repartition theta[ICE] and theta[WATER]. If false, keep theta[ICE] and theta[WATER] fixed, and calculate element temperature. The latter approach is prone to singularities. For example, a soil layer with a temperature below Constants::meltfreeze_tk, but with only water and no ice, cannot be properly initialized (i.e., there is no solution that satisfies thermal equilibrium). + * @return True: element state was modified by this function, false: element state was not modified by this function + * @author Nander Wever + */ +bool vanGenuchten::enforceThermalEquilibrium(const bool fixTemp) +{ + const double h_d = -1E10; + if(EMS->theta[SOIL] > 0. && EMS->Te < Constants::meltfreeze_tk) { + // If this is a soil layer with temperatures below the freezing point of water + const double hw0 = fromTHETAtoHforICE(EMS->theta[WATER], h_d, EMS->theta[ICE]); + EMS->meltfreeze_tk = Constants::meltfreeze_tk + ((Constants::g*Constants::meltfreeze_tk) / Constants::lh_fusion) * hw0; + if (fixTemp) { + // Keep temperature fixed, repartition ice and water according to prescribed temperature + if (EMS->Te >= EMS->meltfreeze_tk) { + // Above freezing point, only water + const double theta_w_new = fromHtoTHETA(hw0); + static const double theta_ice_new = 0.; + EMS->theta[ICE] = theta_ice_new; + EMS->theta[WATER] = theta_w_new; + EMS->theta[AIR] = 1. - EMS->theta[WATER] - EMS->theta[WATER_PREF] - EMS->theta[ICE] - EMS->theta[SOIL]; + } else { + // Freezing conditions + const double theta_w_new = fromHtoTHETA(hw0 + (Constants::lh_fusion / (Constants::g * EMS->meltfreeze_tk)) * (EMS->Te - EMS->meltfreeze_tk)); + const double theta_ice_new = (fromHtoTHETA(hw0) - theta_w_new) / (Constants::density_ice/Constants::density_water); + EMS->theta[ICE] = theta_ice_new; + EMS->theta[WATER] = theta_w_new; + EMS->theta[AIR] = 1. - EMS->theta[WATER] - EMS->theta[WATER_PREF] - EMS->theta[ICE] - EMS->theta[SOIL]; + } + } else { + // Keep theta[ICE] and theta[WATER] constant, and recalculate element temperature + double Te_new = Constants::meltfreeze_tk; + if (EMS->theta[ICE] > 0.) { + Te_new = (fromTHETAtoH(EMS->theta[WATER], h_d) - hw0) * ((Constants::g * EMS->meltfreeze_tk) / Constants::lh_fusion) + EMS->meltfreeze_tk; + //throw mio::UnknownValueException("Not implemented yet.", AT); + } + EMS->Te = Te_new; + } + return true; + } else { + return false; + } +} + +/** + * @brief Initialize van Genuchten model for soil layers, based on index approach via grain size\n + * @author Nander Wever + */ +void vanGenuchten::SetVGParamsSoil() +{ + if(EMS->rg < 0.5) { + SetSoil(ORGANIC); + } else if (EMS->rg < 1.) { + SetSoil(CLAY); + } else if (EMS->rg < 2.) { + SetSoil(CLAYLOAM); + } else if (EMS->rg < 3.) { + SetSoil(LOAM); + } else if (EMS->rg < 4.) { + SetSoil(LOAMYSAND); + } else if (EMS->rg < 5.) { + SetSoil(SAND); + } else if (EMS->rg < 6.) { + SetSoil(SANDYCLAY); + } else if (EMS->rg < 7.) { + SetSoil(SANDYCLAYLOAM); + } else if (EMS->rg < 8.) { + SetSoil(SANDYLOAM); + } else if (EMS->rg < 9.) { + SetSoil(SILT); + } else if (EMS->rg < 10.) { + SetSoil(SILTYCLAY); + } else if (EMS->rg < 11.) { + SetSoil(SILTYCLAYLOAM); + } else if (EMS->rg < 12.) { + SetSoil(SILTLOAM); + } else { + SetSoil(WFJGRAVELSAND); + } + + //Calculate m: + m=(n-1.)/n; + + //Calculate saturation at cut-off point h_e (see Ippisch et al (2006)). + Sc=pow((1.+pow(alpha*fabs(h_e), n)), -1.*m); + + //I encountered the following problem: fully saturated soil and freezing water: there is not enough place to store the ice!!! + //In the old snowpack code, this problem was solved by keeping the increase in volume when all the water in the element would freeze, free as theta[AIR]. + //However, this will not work in the Richards, as theta[WATER] is varying per time step. So we keep free a volume as if the soil is saturated AND will freeze: + EMS->theta[SOIL]=1.-((Constants::density_water/Constants::density_ice)*theta_s); //Determine the soil content based on the pore space + + //The VG model has been initialized + defined=true; + + return; +} diff --git a/third_party/snowpack/vanGenuchten.h b/third_party/snowpack/vanGenuchten.h new file mode 100644 index 00000000..d9649eec --- /dev/null +++ b/third_party/snowpack/vanGenuchten.h @@ -0,0 +1,83 @@ +/* + * SNOWPACK stand-alone + * + * Copyright WSL Institute for Snow and Avalanche Research SLF, DAVOS, SWITZERLAND +*/ +/* This file is part of Snowpack. + Snowpack is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Snowpack is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Snowpack. If not, see . +*/ +/** + * @file vanGenuchten.h + */ +#ifndef VANGENUCHTEN_H +#define VANGENUCHTEN_H + +#include + +/** + * @class vanGenuchten + * @version 1.0 + * @brief This module contains the van Genuchten model for the water retention curve + * @author Nander Wever + */ +class ElementData; + +class vanGenuchten { + + public: + vanGenuchten(ElementData &pEMS); + vanGenuchten(const vanGenuchten& c); + virtual ~vanGenuchten() {} + vanGenuchten& operator=(const vanGenuchten&); ///>(std::iostream& is, vanGenuchten& data); + ElementData *EMS; // Reference to the ElementData where the vanGenuchten class belongs to + + //Soil types + enum SoilTypes{ORGANIC, CLAY, CLAYLOAM, LOAM, LOAMYSAND, SAND, SANDYCLAY, SANDYCLAYLOAM, SANDYLOAM, SILT, SILTYCLAY, SILTYCLAYLOAM, SILTLOAM, WFJGRAVELSAND}; + //Van genuchten model types + enum VanGenuchten_ModelTypesSnow{YAMAGUCHI2012, YAMAGUCHI2010, YAMAGUCHI2010_ADAPTED, DAANEN}; + //Hydraulic conductivity parameterizations + enum K_Parameterizations{CALONNE, KOZENYCARMAN, SHIMIZU}; + + // Functions + static double AirEntryPressureHead(double MaximumPoreSize, double Temperature); + + // Van Genuchten functions + double fromTHETAtoH(const double theta, const double h_d); + double fromTHETAtoHforICE(const double theta, const double h_d, const double theta_i); + double fromHtoTHETA(const double h); + double fromHtoTHETAforICE(const double h, const double theta_i); + double dtheta_dh(const double h); + + // Functions to initialize the van Genuchten model + void SetVGParamsSnow(VanGenuchten_ModelTypesSnow VGModelTypeSnow, K_Parameterizations K_PARAM, const bool& matrix, const bool& seaice); + void SetVGParamsSoil(); + bool enforceThermalEquilibrium(const bool fixTemp=true); + + double theta_r; //Soil property, residual water content. + double theta_s; //Soil property, saturation water content. + double alpha; //Soil property in Van Genuchten model. [m^-1] + double n; //Soil property in Van Genuchten model. + double m; //Soil property in Van Genuchten model. + double h_e; //Soil property, air entry pressure, see Ippisch (2006) for details. + double Sc; //Saturation at cut-off point h_e (see Ippisch et al (2006)). + double ksat; //Soil property. Saturation hydraulic conductivity. + double field_capacity; //Soil property, grain size + bool defined; //true: the van Genuchten model has been initialized for this layer, false: the van Genuchten model is not initialized and should not be used. + + private: + void SetSoil(SoilTypes type); +}; +#endif // End of vanGenuchten.h}