diff --git a/peakshaving_analyzer/PSA.py b/peakshaving_analyzer/PSA.py index 5405021..fb7367a 100644 --- a/peakshaving_analyzer/PSA.py +++ b/peakshaving_analyzer/PSA.py @@ -151,6 +151,7 @@ def add_storage(self): "stored_energy": self.config.inverter_efficiency, }, hasCapacityVariable=True, + capacityMax=pd.Series([self.config.max_inverter_charge, 0], index=["consumption_site", "grid"]), investPerCapacity=0, linkedConversionCapacityID="storage", interestRate=self.config.interest_rate / 100, @@ -189,6 +190,7 @@ def add_storage(self): physicalUnit="kWh", commodityConversionFactors={"stored_energy": -1, "energy": 1}, hasCapacityVariable=True, + capacityMax=pd.Series([self.config.max_inverter_discharge, 0], index=["consumption_site", "grid"]), investPerCapacity=self.config.inverter_cost_per_kw, economicLifetime=self.config.inverter_lifetime, technicalLifetime=self.config.inverter_lifetime, diff --git a/peakshaving_analyzer/__init__.py b/peakshaving_analyzer/__init__.py index 0bf3b61..bca8fdc 100644 --- a/peakshaving_analyzer/__init__.py +++ b/peakshaving_analyzer/__init__.py @@ -2,10 +2,11 @@ from peakshaving_analyzer.input import Config, load_oeds_config, load_yaml_config from peakshaving_analyzer.output import Results from peakshaving_analyzer.PSA import PeakShavingAnalyzer +from peakshaving_analyzer.util import create_default_yaml """ PeakShaverAnalyzer package initialization. """ -__all__ = ["PeakShavingAnalyzer", "Config", "Results", "load_yaml_config", "load_oeds_config"] -__version__ = "0.1.8" +__all__ = ["PeakShavingAnalyzer", "Config", "Results", "load_yaml_config", "load_oeds_config", "create_default_yaml"] +__version__ = "0.1.9" diff --git a/peakshaving_analyzer/common.py b/peakshaving_analyzer/common.py index 11aa7f1..1bd7f18 100644 --- a/peakshaving_analyzer/common.py +++ b/peakshaving_analyzer/common.py @@ -35,13 +35,23 @@ def to_yaml(self, path: str | Path): with open(path, "w") as f: yaml.safe_dump(self.to_dict(include_timeseries=False), f, sort_keys=False) - def _plot(self, cols_to_plot: list[str] | None = None): + def _plot( + self, cols_to_plot: list[str] | None = None, xaxis_title: str | None = None, yaxis_title: str | None = None + ): ts_df = self.timeseries_to_df() + if "timestamp" in ts_df.columns: + x = ts_df["timestamp"] + elif "datetime" in ts_df.columns: + x = ts_df["datetime"] + else: + x = ts_df.index + if not cols_to_plot: cols_to_plot = ts_df.columns.tolist() - fig = px.line(ts_df, x=ts_df.index, y=cols_to_plot) + fig = px.line(ts_df, x=x, y=cols_to_plot) + fig.update_layout(xaxis_title=xaxis_title, yaxis_title=yaxis_title) fig.show() def plot_timeseries(self): diff --git a/peakshaving_analyzer/config.yml b/peakshaving_analyzer/config.yml index c016a14..668cf3b 100644 --- a/peakshaving_analyzer/config.yml +++ b/peakshaving_analyzer/config.yml @@ -65,6 +65,9 @@ inverter_cost_per_kw: 180 # storage inverter cost in euro per kw # taken from: # https://www.sciencedirect.com/science/article/pii/S1876610216310736 +max_inverter_charge_kw: # max charge power in kW +max_inverter_discharge_kw: # max discharge power in kW + ################################# # Existing PV system parameters # ################################# diff --git a/peakshaving_analyzer/input.py b/peakshaving_analyzer/input.py index 3e62d36..dcc1610 100644 --- a/peakshaving_analyzer/input.py +++ b/peakshaving_analyzer/input.py @@ -98,15 +98,15 @@ def load_yaml_config(config_file_path: Path | str, test_mode: bool = False) -> C # set config dir var data["config_dir"] = config_path.parent + _check_minimum_inputs(data) + # read in consumption timeseries - data["consumption_timeseries"] = pd.read_csv(data["config_dir"] / data["consumption_file_path"])[ - data["consumption_value_column"] - ] + data["consumption_timeseries"] = pd.read_csv(data["consumption_file_path"])[data["consumption_value_column"]] log.info("Consumption timeseries loaded") # read in timestamps if provided if data.get("timestamp_column"): - data["timestamps"] = pd.read_csv(data["config_dir"] / data["consumption_file_path"])[data["timestamp_column"]] + data["timestamps"] = pd.to_datetime(pd.read_csv(data["consumption_file_path"])[data["timestamp_column"]]) log.info("Timestamps loaded") else: data["timestamps"] = None @@ -241,9 +241,47 @@ def load_oeds_config( return Config(**data) +def _check_minimum_inputs(data): + if data.get("consumption_file_path") is None: + raise ValueError("Please provide a consumption file path!") + + if data.get("hours_per_timestep") is None: + raise ValueError("Please provide hours per timestep!") + + if data.get("producer_energy_price") is None and data.get("price_file_path") is None: + raise ValueError("Please provide either producer energy price or price timeseries!") + + if ( + data.get("pv_system_already_exists") + and data.get("existing_pv_file_path") is None + and (data.get("postal_code") is None or data.get("existing_pv_size_kwp") is None) + ): + msg = "When including already existing PV system, you need to provide either the generation timeseries (existing pv file path" + msg += " or your postal code and the existing PV system size in kWpeak!" + raise ValueError(msg) + + if ( + data.get("allow_additional_pv") + and not data.get("pv_system_already_exists") + and data.get("new_pv_file_path") is None + and data.get("postal_code") is None + ): + msg = "When including a new PV system (without an existing one), you need to provide either the generation timeseries (new pv file path)" + msg += " or your postal code!" + raise ValueError(msg) + + if data.get("grid_capacity_price") is None: + msg = "Please provide a grid capacity price. If you don't wish to model grid capacity price, set price to 0." + raise ValueError(msg) + + if data.get("grid_energy_price") is None: + msg = "Please provide a grid energy price. If you don't wish to model grid energy price, set price to 0." + raise ValueError(msg) + + def _create_timeseries_metadata(data): # if no timestamps are given, we create them - if not data.get("timestamps", None): + if data.get("timestamps", None) is None: data["n_timesteps"] = len(data["consumption_timeseries"]) data["leap_year"] = _detect_leap_year(data) data["assumed_year"] = _assume_year(data) @@ -256,8 +294,10 @@ def _create_timeseries_metadata(data): # otherwise we just create the metadata from the timestamps else: data["n_timesteps"] = len(data["timestamps"]) - data["leap_year"] = calendar.isleap(data["timestamps"][0].year) - data["assumed_year"] = data["timestamps"][0].year + + timestep_to_use = data["timestamps"][len(data["timestamps"]) // 2] + data["leap_year"] = calendar.isleap(timestep_to_use.year) + data["assumed_year"] = timestep_to_use.year def _detect_leap_year(data): @@ -360,7 +400,7 @@ def _read_price_timeseries(data): pd.Series: The price timeseries. """ log.info("Reading price timeseries from CSV file.") - df = pd.read_csv(data["config_dir"] / data["price_file_path"]) + df = pd.read_csv(data["price_file_path"]) df.rename( columns={data.get("price_value_column", "value"): "grid"}, inplace=True, @@ -384,9 +424,7 @@ def _load_pv_timeseries(data): if data.get("pv_system_already_exists"): # load from CSV if provided if data.get("existing_pv_file_path"): - pv_gen = pd.read_csv(data["config_dir"] / data["existing_pv_file_path"])[ - data.get("existing_pv_value_column", "value") - ] + pv_gen = pd.read_csv(data["existing_pv_file_path"])[data.get("existing_pv_value_column", "value")] pv_gen.rename("consumption_site", inplace=True) data["existing_pv_size_kwp"] = pv_gen.max() # set existing system size pv_gen = pv_gen / pv_gen.max() # scale to values from 0 to 1 @@ -412,9 +450,7 @@ def _load_pv_timeseries(data): if data.get("allow_additional_pv"): # load from csv if provided if data.get("new_pv_file_path"): - pv_gen = pd.read_csv(data["config_dir"] / data["new_pv_file_path"])[ - data.get("new_pv_value_column", "value") - ] + pv_gen = pd.read_csv(data["new_pv_file_path"])[data.get("new_pv_value_column", "value")] pv_gen.rename("consumption_site", inplace=True) log.info("existing pv generation timeseries loaded") diff --git a/peakshaving_analyzer/output.py b/peakshaving_analyzer/output.py index ac6a774..ee1b60a 100644 --- a/peakshaving_analyzer/output.py +++ b/peakshaving_analyzer/output.py @@ -4,6 +4,7 @@ import fine as fn import pandas as pd +import plotly.graph_objects as go import sqlalchemy from peakshaving_analyzer import Config @@ -97,8 +98,46 @@ def to_sql( df.to_sql(name=timeseries_table_name, schema=schema, con=connection, if_exists="append", index=False) def plot_storage_timeseries(self): - storage_columns = ["storage_charge_kw", "storage_discharge_kw", "storage_soc_kwh"] - self._plot(cols_to_plot=storage_columns) + fig = go.Figure() + + ts_df = self.timeseries_to_df() + if "timestamp" in ts_df.columns: + x = ts_df["timestamp"] + elif "datetime" in ts_df.columns: + x = ts_df["datetime"] + else: + x = ts_df.index + + fig.add_trace( + go.Scatter( + x=x, + y=ts_df["storage_charge_kw"], + name="Storage charge (kW)", + mode="lines", + ) + ) + fig.add_trace( + go.Scatter( + x=x, + y=ts_df["storage_discharge_kw"], + name="Storage discharge (kW)", + mode="lines", + ) + ) + + fig.add_trace(go.Scatter(x=x, y=ts_df["storage_soc_kwh"], name="Storage SOC (kWh)", mode="lines", yaxis="y2")) + + # Layout anpassen + fig.update_layout( + title="Charge / Discharge (kW) und SOC (kWh)", + yaxis=dict( + title="Charge / Discharge (kW)", + ), + yaxis2=dict(title="SOC (kWh)", overlaying="y", side="right"), + template="plotly_white", + ) + + fig.show() def plot_consumption_timeseries(self): consumption_columns = [ @@ -108,7 +147,7 @@ def plot_consumption_timeseries(self): "new_pv_generation_kw", "consumption_kw", ] - self._plot(cols_to_plot=consumption_columns) + self._plot(cols_to_plot=consumption_columns, yaxis_title="Power in kW") def create_results(config: Config, esm: fn.EnergySystemModel) -> Results: diff --git a/peakshaving_analyzer/util.py b/peakshaving_analyzer/util.py new file mode 100644 index 0000000..35852e6 --- /dev/null +++ b/peakshaving_analyzer/util.py @@ -0,0 +1,47 @@ +import yaml + + +def create_default_yaml(): + data = { + "name": "example_optimization", + "hours_per_timestep": 0.25, + "add_storage": True, + "allow_additional_pv": False, + "auto_opt": False, + "solver": "gurobi", + "verbose": True, + "timestamp_column": None, + "consumption_file_path": None, + "consumption_value_column": None, + "price_file_path": None, + "price_value_column": None, + "storage_lifetime": 15, + "storage_cost_per_kwh": 145, + "max_storage_size_kwh": None, + "storage_charge_efficiency": 0.9, + "storage_discharge_efficiency": 0.9, + "storage_charge_rate": 1, + "storage_discharge_rate": 1, + "inverter_efficiency": 0.95, + "inverter_lifetime": 15, + "inverter_cost_per_kw": 180, + "max_inverter_charge_kw": None, + "max_inverter_discharge_kw": None, + "pv_system_already_exists": False, + "existing_pv_file_path": None, + "existing_pv_column": None, + "existing_pv_size_kwp": None, + "postal_code": None, + "pv_system_lifetime": 30, + "pv_system_cost_per_kwp": 1250, + "pv_system_kwp_per_m2": 0.4, + "max_pv_system_size_kwp": None, + "new_pv_file_path": None, + "new_pv_value_column": None, + "overwrite_price_timeseries": True, + "producer_energy_price": 0.1665, + "grid_capacity_price": 101.22, + "grid_energy_price": 0.0460, + } + with open("config.yaml", "w") as f: + yaml.dump(data, f, sort_keys=False, default_flow_style=False) diff --git a/pyproject.toml b/pyproject.toml index e44ab28..3827b6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "peakshaving-analyzer" -version = "0.1.8" +version = "0.1.9" description = "Peak shaving analysis for industrial load profiles" authors = [ { name = "Christoph Komanns", email = "c.komanns@gmail.com" },