Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions peakshaving_analyzer/PSA.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ def add_storage(self):
"stored_energy": self.config.inverter_efficiency,
},
hasCapacityVariable=True,
capacityMax=pd.Series([self.config.max_inverter_charge, 0], index=["consumption_site", "grid"]),
investPerCapacity=0,
linkedConversionCapacityID="storage",
interestRate=self.config.interest_rate / 100,
Expand Down Expand Up @@ -189,6 +190,7 @@ def add_storage(self):
physicalUnit="kWh",
commodityConversionFactors={"stored_energy": -1, "energy": 1},
hasCapacityVariable=True,
capacityMax=pd.Series([self.config.max_inverter_discharge, 0], index=["consumption_site", "grid"]),
investPerCapacity=self.config.inverter_cost_per_kw,
economicLifetime=self.config.inverter_lifetime,
technicalLifetime=self.config.inverter_lifetime,
Expand Down
5 changes: 3 additions & 2 deletions peakshaving_analyzer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
from peakshaving_analyzer.input import Config, load_oeds_config, load_yaml_config
from peakshaving_analyzer.output import Results
from peakshaving_analyzer.PSA import PeakShavingAnalyzer
from peakshaving_analyzer.util import create_default_yaml

"""
PeakShaverAnalyzer package initialization.
"""

__all__ = ["PeakShavingAnalyzer", "Config", "Results", "load_yaml_config", "load_oeds_config"]
__version__ = "0.1.8"
__all__ = ["PeakShavingAnalyzer", "Config", "Results", "load_yaml_config", "load_oeds_config", "create_default_yaml"]
__version__ = "0.1.9"
14 changes: 12 additions & 2 deletions peakshaving_analyzer/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,23 @@ def to_yaml(self, path: str | Path):
with open(path, "w") as f:
yaml.safe_dump(self.to_dict(include_timeseries=False), f, sort_keys=False)

def _plot(self, cols_to_plot: list[str] | None = None):
def _plot(
self, cols_to_plot: list[str] | None = None, xaxis_title: str | None = None, yaxis_title: str | None = None
):
ts_df = self.timeseries_to_df()

if "timestamp" in ts_df.columns:
x = ts_df["timestamp"]
elif "datetime" in ts_df.columns:
x = ts_df["datetime"]
else:
x = ts_df.index

if not cols_to_plot:
cols_to_plot = ts_df.columns.tolist()

fig = px.line(ts_df, x=ts_df.index, y=cols_to_plot)
fig = px.line(ts_df, x=x, y=cols_to_plot)
fig.update_layout(xaxis_title=xaxis_title, yaxis_title=yaxis_title)
fig.show()

def plot_timeseries(self):
Expand Down
3 changes: 3 additions & 0 deletions peakshaving_analyzer/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,9 @@ inverter_cost_per_kw: 180 # storage inverter cost in euro per kw
# taken from:
# https://www.sciencedirect.com/science/article/pii/S1876610216310736

max_inverter_charge_kw: # max charge power in kW
max_inverter_discharge_kw: # max discharge power in kW

#################################
# Existing PV system parameters #
#################################
Expand Down
64 changes: 50 additions & 14 deletions peakshaving_analyzer/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,15 +98,15 @@ def load_yaml_config(config_file_path: Path | str, test_mode: bool = False) -> C
# set config dir var
data["config_dir"] = config_path.parent

_check_minimum_inputs(data)

# read in consumption timeseries
data["consumption_timeseries"] = pd.read_csv(data["config_dir"] / data["consumption_file_path"])[
data["consumption_value_column"]
]
data["consumption_timeseries"] = pd.read_csv(data["consumption_file_path"])[data["consumption_value_column"]]
log.info("Consumption timeseries loaded")

# read in timestamps if provided
if data.get("timestamp_column"):
data["timestamps"] = pd.read_csv(data["config_dir"] / data["consumption_file_path"])[data["timestamp_column"]]
data["timestamps"] = pd.to_datetime(pd.read_csv(data["consumption_file_path"])[data["timestamp_column"]])
log.info("Timestamps loaded")
else:
data["timestamps"] = None
Expand Down Expand Up @@ -241,9 +241,47 @@ def load_oeds_config(
return Config(**data)


def _check_minimum_inputs(data):
if data.get("consumption_file_path") is None:
raise ValueError("Please provide a consumption file path!")

if data.get("hours_per_timestep") is None:
raise ValueError("Please provide hours per timestep!")

if data.get("producer_energy_price") is None and data.get("price_file_path") is None:
raise ValueError("Please provide either producer energy price or price timeseries!")

if (
data.get("pv_system_already_exists")
and data.get("existing_pv_file_path") is None
and (data.get("postal_code") is None or data.get("existing_pv_size_kwp") is None)
):
msg = "When including already existing PV system, you need to provide either the generation timeseries (existing pv file path"
msg += " or your postal code and the existing PV system size in kWpeak!"
raise ValueError(msg)

if (
data.get("allow_additional_pv")
and not data.get("pv_system_already_exists")
and data.get("new_pv_file_path") is None
and data.get("postal_code") is None
):
msg = "When including a new PV system (without an existing one), you need to provide either the generation timeseries (new pv file path)"
msg += " or your postal code!"
raise ValueError(msg)

if data.get("grid_capacity_price") is None:
msg = "Please provide a grid capacity price. If you don't wish to model grid capacity price, set price to 0."
raise ValueError(msg)

if data.get("grid_energy_price") is None:
msg = "Please provide a grid energy price. If you don't wish to model grid energy price, set price to 0."
raise ValueError(msg)


def _create_timeseries_metadata(data):
# if no timestamps are given, we create them
if not data.get("timestamps", None):
if data.get("timestamps", None) is None:
data["n_timesteps"] = len(data["consumption_timeseries"])
data["leap_year"] = _detect_leap_year(data)
data["assumed_year"] = _assume_year(data)
Expand All @@ -256,8 +294,10 @@ def _create_timeseries_metadata(data):
# otherwise we just create the metadata from the timestamps
else:
data["n_timesteps"] = len(data["timestamps"])
data["leap_year"] = calendar.isleap(data["timestamps"][0].year)
data["assumed_year"] = data["timestamps"][0].year

timestep_to_use = data["timestamps"][len(data["timestamps"]) // 2]
data["leap_year"] = calendar.isleap(timestep_to_use.year)
data["assumed_year"] = timestep_to_use.year


def _detect_leap_year(data):
Expand Down Expand Up @@ -360,7 +400,7 @@ def _read_price_timeseries(data):
pd.Series: The price timeseries.
"""
log.info("Reading price timeseries from CSV file.")
df = pd.read_csv(data["config_dir"] / data["price_file_path"])
df = pd.read_csv(data["price_file_path"])
df.rename(
columns={data.get("price_value_column", "value"): "grid"},
inplace=True,
Expand All @@ -384,9 +424,7 @@ def _load_pv_timeseries(data):
if data.get("pv_system_already_exists"):
# load from CSV if provided
if data.get("existing_pv_file_path"):
pv_gen = pd.read_csv(data["config_dir"] / data["existing_pv_file_path"])[
data.get("existing_pv_value_column", "value")
]
pv_gen = pd.read_csv(data["existing_pv_file_path"])[data.get("existing_pv_value_column", "value")]
pv_gen.rename("consumption_site", inplace=True)
data["existing_pv_size_kwp"] = pv_gen.max() # set existing system size
pv_gen = pv_gen / pv_gen.max() # scale to values from 0 to 1
Expand All @@ -412,9 +450,7 @@ def _load_pv_timeseries(data):
if data.get("allow_additional_pv"):
# load from csv if provided
if data.get("new_pv_file_path"):
pv_gen = pd.read_csv(data["config_dir"] / data["new_pv_file_path"])[
data.get("new_pv_value_column", "value")
]
pv_gen = pd.read_csv(data["new_pv_file_path"])[data.get("new_pv_value_column", "value")]
pv_gen.rename("consumption_site", inplace=True)
log.info("existing pv generation timeseries loaded")

Expand Down
45 changes: 42 additions & 3 deletions peakshaving_analyzer/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import fine as fn
import pandas as pd
import plotly.graph_objects as go
import sqlalchemy

from peakshaving_analyzer import Config
Expand Down Expand Up @@ -97,8 +98,46 @@ def to_sql(
df.to_sql(name=timeseries_table_name, schema=schema, con=connection, if_exists="append", index=False)

def plot_storage_timeseries(self):
storage_columns = ["storage_charge_kw", "storage_discharge_kw", "storage_soc_kwh"]
self._plot(cols_to_plot=storage_columns)
fig = go.Figure()

ts_df = self.timeseries_to_df()
if "timestamp" in ts_df.columns:
x = ts_df["timestamp"]
elif "datetime" in ts_df.columns:
x = ts_df["datetime"]
else:
x = ts_df.index

fig.add_trace(
go.Scatter(
x=x,
y=ts_df["storage_charge_kw"],
name="Storage charge (kW)",
mode="lines",
)
)
fig.add_trace(
go.Scatter(
x=x,
y=ts_df["storage_discharge_kw"],
name="Storage discharge (kW)",
mode="lines",
)
)

fig.add_trace(go.Scatter(x=x, y=ts_df["storage_soc_kwh"], name="Storage SOC (kWh)", mode="lines", yaxis="y2"))

# Layout anpassen
fig.update_layout(
title="Charge / Discharge (kW) und SOC (kWh)",
yaxis=dict(
title="Charge / Discharge (kW)",
),
yaxis2=dict(title="SOC (kWh)", overlaying="y", side="right"),
template="plotly_white",
)

fig.show()

def plot_consumption_timeseries(self):
consumption_columns = [
Expand All @@ -108,7 +147,7 @@ def plot_consumption_timeseries(self):
"new_pv_generation_kw",
"consumption_kw",
]
self._plot(cols_to_plot=consumption_columns)
self._plot(cols_to_plot=consumption_columns, yaxis_title="Power in kW")


def create_results(config: Config, esm: fn.EnergySystemModel) -> Results:
Expand Down
47 changes: 47 additions & 0 deletions peakshaving_analyzer/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import yaml


def create_default_yaml():
data = {
"name": "example_optimization",
"hours_per_timestep": 0.25,
"add_storage": True,
"allow_additional_pv": False,
"auto_opt": False,
"solver": "gurobi",
"verbose": True,
"timestamp_column": None,
"consumption_file_path": None,
"consumption_value_column": None,
"price_file_path": None,
"price_value_column": None,
"storage_lifetime": 15,
"storage_cost_per_kwh": 145,
"max_storage_size_kwh": None,
"storage_charge_efficiency": 0.9,
"storage_discharge_efficiency": 0.9,
"storage_charge_rate": 1,
"storage_discharge_rate": 1,
"inverter_efficiency": 0.95,
"inverter_lifetime": 15,
"inverter_cost_per_kw": 180,
"max_inverter_charge_kw": None,
"max_inverter_discharge_kw": None,
"pv_system_already_exists": False,
"existing_pv_file_path": None,
"existing_pv_column": None,
"existing_pv_size_kwp": None,
"postal_code": None,
"pv_system_lifetime": 30,
"pv_system_cost_per_kwp": 1250,
"pv_system_kwp_per_m2": 0.4,
"max_pv_system_size_kwp": None,
"new_pv_file_path": None,
"new_pv_value_column": None,
"overwrite_price_timeseries": True,
"producer_energy_price": 0.1665,
"grid_capacity_price": 101.22,
"grid_energy_price": 0.0460,
}
with open("config.yaml", "w") as f:
yaml.dump(data, f, sort_keys=False, default_flow_style=False)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "peakshaving-analyzer"
version = "0.1.8"
version = "0.1.9"
description = "Peak shaving analysis for industrial load profiles"
authors = [
{ name = "Christoph Komanns", email = "[email protected]" },
Expand Down