diff --git a/tests/test_scaler.py b/tests/test_scaler.py new file mode 100644 index 0000000..989d11c --- /dev/null +++ b/tests/test_scaler.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 + +import logging +import os +import pathlib + +import pandas as pd +from sklearn.preprocessing import StandardScaler + +from tot.benchmark import SimpleBenchmark +from tot.datasets.dataset import Dataset +from tot.models.models_neuralprophet import NeuralProphetModel + +log = logging.getLogger("tot.test") +log.setLevel("WARNING") +log.parent.setLevel("WARNING") + +DIR = pathlib.Path(__file__).parent.parent.absolute() +DATA_DIR = os.path.join(DIR, "datasets") +AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv") +ERCOT_FILE = os.path.join(DATA_DIR, "ercot_load_reduced.csv") +SAVE_DIR = os.path.join(DIR, "tests", "test-logs") +if not os.path.isdir(SAVE_DIR): + os.makedirs(SAVE_DIR) + +try: + from prophet import Prophet + + _prophet_installed = True +except ImportError: + Prophet = None + _prophet_installed = False + +NROWS = 128 +EPOCHS = 2 +BATCH_SIZE = 64 +LR = 1.0 +ERCOT_REGIONS = ["NORTH", "EAST", "FAR_WEST"] + +PLOT = False + + +def test_scaling_per_dataset(): + ercot_df_aux = pd.read_csv(ERCOT_FILE) + ercot_df = pd.DataFrame() + for region in ERCOT_REGIONS: + ercot_df = pd.concat( + ( + ercot_df, + ercot_df_aux[ercot_df_aux["ID"] == region].iloc[:NROWS].copy(deep=True), + ), + ignore_index=True, + ) + air_passengers_df = pd.read_csv(AIR_FILE, nrows=NROWS) + + dataset_list = [ + Dataset( + df=air_passengers_df, + name="air_passengers", + freq="MS", + seasonality_mode="multiplicative", + ), + Dataset( + df=ercot_df, + name="ercot", + freq="H", + ), + ] + model_classes_and_params = [ + ( + NeuralProphetModel, + { + "scaler": StandardScaler(), + "scaling_level": "per_dataset", + "n_lags": 5, + "n_forecasts": 3, + "learning_rate": 0.1, + "normalize": "off", + }, + ), + ] + log.debug("{}".format(model_classes_and_params)) + + benchmark = SimpleBenchmark( + model_classes_and_params=model_classes_and_params, + datasets=dataset_list, + metrics=["MAE"], + test_percentage=0.25, + ) + results_train, results_test = benchmark.run() + + log.debug("{}".format(results_test)) + + +def test_scaling_per_time_series(): + ercot_df_aux = pd.read_csv(ERCOT_FILE) + ercot_df = pd.DataFrame() + for region in ERCOT_REGIONS: + ercot_df = pd.concat( + ( + ercot_df, + ercot_df_aux[ercot_df_aux["ID"] == region].iloc[:NROWS].copy(deep=True), + ), + ignore_index=True, + ) + air_passengers_df = pd.read_csv(AIR_FILE, nrows=NROWS) + + dataset_list = [ + Dataset( + df=air_passengers_df, + name="air_passengers", + freq="MS", + seasonality_mode="multiplicative", + ), + Dataset( + df=ercot_df, + name="ercot", + freq="H", + ), + ] + model_classes_and_params = [ + ( + NeuralProphetModel, + { + "scaler": StandardScaler(), + "scaling_level": "per_time_series", + "n_lags": 5, + "n_forecasts": 3, + "learning_rate": 0.1, + "normalize": "off", + }, + ), + ] + log.debug("{}".format(model_classes_and_params)) + + benchmark = SimpleBenchmark( + model_classes_and_params=model_classes_and_params, + datasets=dataset_list, + metrics=["MAE"], + test_percentage=0.25, + ) + results_train, results_test = benchmark.run() + + log.debug("{}".format(results_test)) diff --git a/tot/data_processing/__init__.py b/tot/data_processing/__init__.py new file mode 100644 index 0000000..5759fc6 --- /dev/null +++ b/tot/data_processing/__init__.py @@ -0,0 +1 @@ +from .scaler import Scaler # noqa: F401 to evade flake8 diff --git a/tot/data_processing/scaler.py b/tot/data_processing/scaler.py new file mode 100644 index 0000000..33fe3cf --- /dev/null +++ b/tot/data_processing/scaler.py @@ -0,0 +1,218 @@ +from dataclasses import dataclass +from typing import Callable, Tuple + +import pandas as pd + +from tot.error_utils import raise_if + + +def _pivot(df, col_name): + return df.pivot(index="ds", columns="ID", values=col_name).rename_axis(columns=None).reset_index() + + +def _melt(df, IDs, col_name): + return pd.melt(df, id_vars="ds", value_vars=IDs, var_name="ID", value_name=col_name) + + +SCALING_LEVELS = ["per_dataset", "per_time_series"] + + +@dataclass +class Scaler: + """ + A scaling module allowing to perform transform and inverse_transform operations on the time series data. Supports + transformers from `sklearn.preprocessing` package and other scalers implementing `fit`, `transform` and + `inverse_transform` methods. See: https://scikit-learn.org/stable/modules/preprocessing.html + `scaling_level` specifies global ("per_dataset") or local scaling ("per_time_series"). + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler + >>> scaler = Scaler(transformer=StandardScaler(), scaling_level="per_dataset") + >>> df_train, df_test = scaler.transform(df_train, df_test) + >>> fcst_train, fcst_train = scaler.inverse_transform(fcst_train, fcst_train) + """ + + transformer: object + scaling_level: str + + def __post_init__(self): + is_transformer_valid = ( + callable(getattr(self.transformer, "fit", None)) + and callable(getattr(self.transformer, "transform", None)) + and callable(getattr(self.transformer, "inverse_transform", None)) + ) + raise_if( + not is_transformer_valid, + "Transformer provided to the Scaler must implement fit, transform and " "inverse_transform methods", + ) + raise_if( + self.scaling_level not in SCALING_LEVELS, + "Invalid scaling level. Available levels: `per_dataset`, " "`per_time_series`", + ) + + def _scale_per_series(self, df: pd.DataFrame, fit: bool = False) -> pd.DataFrame: + """ + Applies `transform` per series. Fits the `transformer` if `fit` set to True. First, pivot is performed on the + dataframe so that unique `ID`s become columns, then the transformation is applied to the df's values. Data is + returned in its original format. + + Parameters: + ----------- + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with data + fit : bool + if set to True Scaler is fitted on data from `df` + + Returns: + -------- + pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with transformed data + """ + IDs = df["ID"].unique() + df_pivot = _pivot(df, "y") + + if fit: + self.transformer.fit(df_pivot[IDs]) + df_pivot[IDs] = self.transformer.transform(df_pivot[IDs]) + + return _melt(df_pivot, IDs, "y") + + def _scale(self, df: pd.DataFrame, fit=False) -> pd.DataFrame: + """ + Applies `transform` on `y` column in `df`. Fits the `transformer` if `fit` set to True. + + Parameters: + ----------- + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with data + fit : bool + if set to True Scaler is fitted with data from `df` + + Returns: + -------- + pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with transformed data + """ + if fit: + self.transformer.fit(df["y"].values.reshape(-1, 1)) + df["y"] = self.transformer.transform(df["y"].values.reshape(-1, 1)) + return df + + def _rescale_per_series(self, df: pd.DataFrame, col_name: str) -> pd.DataFrame: + """ + Applies `inverse_transform` per series. First, pivot is performed on the dataframe so that unique `ID`s + become columns, then inverse transformation is applied. Data is returned in its original format. + + Parameters: + ----------- + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, [``yhat``], and optionally ``ID`` with data + col_name : str + name of the column, on which the operation is applied + + Returns: + -------- + pd.DataFrame + dataframe containing column ``ds``, ``y``, [``yhat``], and optionally ``ID`` with rescaled data + """ + IDs = df["ID"].unique() + df_pivot = _pivot(df, col_name) + + df_pivot[IDs] = self.transformer.inverse_transform(df_pivot[IDs]) + + return _melt(df_pivot, IDs, col_name) + + def _rescale(self, df: pd.DataFrame, col_name: str) -> pd.DataFrame: + """ + Applies `inverse_transform` on column `col_name` in `df`. + + Parameters: + ----------- + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, [``yhat``], and optionally ``ID`` with data + col_name : str + name of the column, on which the operation is applied + + Returns: + -------- + pd.DataFrame + dataframe containing column ``ds``, ``y``, [``yhat``], and optionally ``ID`` with rescaled data + """ + df[col_name] = self.transformer.inverse_transform(df[col_name].values.reshape(-1, 1)) + return df + + def _inverse_transform(self, df: pd.DataFrame, rescale_method: Callable) -> pd.DataFrame: + """ + Applies rescaling on the `df`. First, rescaling is performed on the `y` column to create the main df. Then, + operation is repeated on all `yhat` values and results are updated in the main df. Proper `rescale` + implementation is chosen based on scaling level. + + Parameters: + ----------- + df : pd.DataFrame + dataframe containing column ``ds``, ``y``, [``yhat``] and optionally ``ID`` with data + + Returns: + -------- + pd.DataFrame + dataframe containing column ``ds``, ``y``, [``yhat``] and optionally ``ID`` with rescaled data + """ + result = rescale_method(df, "y") + + yhats = [col for col in df.columns if "yhat" in col] + for yhat in yhats: + result[yhat] = rescale_method(df, yhat)[yhat] + + return result + + def transform(self, df_train: pd.DataFrame, df_test: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]: + """ + Applies `transform` on the dataframes. The transformer is fit on the `df_train`. + + Parameters: + ----------- + df_train : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with train data + df_train : pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with test data + + Returns: + -------- + pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with scaled train data + pd.DataFrame + dataframe containing column ``ds``, ``y``, and optionally ``ID`` with scaled test data + """ + df_train = df_train.copy() + df_test = df_test.copy() + if self.scaling_level == "per_time_series": + return self._scale_per_series(df_train, fit=True), self._scale_per_series(df_test) + + return self._scale(df_train, fit=True), self._scale(df_test) + + def inverse_transform(self, df_train: pd.DataFrame, df_test: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]: + """ + Applies `inverse_transform` on the dataframes. + + Parameters: + ----------- + df_train : pd.DataFrame + dataframe containing column ``ds``, ``y``,[``yhat``], and optionally ``ID`` with train results + df_train : pd.DataFrame + dataframe containing column ``ds``, ``y``,[``yhat``], and optionally ``ID`` with test results + + Returns: + -------- + pd.DataFrame + dataframe containing column ``ds``, ``y``,[``yhat``], and optionally ``ID`` with rescaled train results + pd.DataFrame + dataframe containing column ``ds``, ``y``,[``yhat``], and optionally ``ID`` with rescaled test results + """ + df_train = df_train.copy() + df_test = df_test.copy() + if self.scaling_level == "per_time_series": + rescale_method = self._rescale_per_series + else: + rescale_method = self._rescale + return self._inverse_transform(df_train, rescale_method), self._inverse_transform(df_test, rescale_method) diff --git a/tot/experiment.py b/tot/experiment.py index b02194c..c0a2d50 100644 --- a/tot/experiment.py +++ b/tot/experiment.py @@ -9,6 +9,7 @@ import pandas as pd from neuralprophet import set_random_seed +from tot.data_processing.scaler import Scaler from tot.datasets.dataset import Dataset from tot.df_utils import ( check_dataframe, @@ -42,6 +43,7 @@ class Experiment(ABC): metadata: Optional[dict] = None save_dir: Optional[str] = None num_processes: int = 1 + scaler: Scaler = None def __post_init__(self): data_params = {} @@ -75,6 +77,11 @@ def __post_init__(self): "experiment": self.experiment_name, } + scaler = self.params.pop("scaler", None) + if scaler is not None: + scaling_level = self.params.pop("scaling_level", "per_dataset") + self.scaler = Scaler(transformer=scaler, scaling_level=scaling_level) + def write_results_to_csv(self, df, prefix, current_fold=None): """ Write evaluation results to a CSV file. @@ -224,6 +231,10 @@ def run(self): test_percentage=self.test_percentage, local_split=False, ) + + if self.scaler is not None: + df_train, df_test = self.scaler.transform(df_train, df_test) + # fit model model = self.model_class(self.params) model.fit(df=df_train, freq=self.data.freq) @@ -234,6 +245,10 @@ def run(self): df_test=df_test, received_single_time_series=received_single_time_series, ) + + if self.scaler is not None: + fcst_train, fcst_test = self.scaler.inverse_transform(fcst_train, fcst_test) + # data-specific post-processing fcst_train, df_train = maybe_drop_added_dates(fcst_train, df_train) fcst_test, df_test = maybe_drop_added_dates(fcst_test, df_test) diff --git a/tutorials/how_to_create_custom_evaluation_pipeline.ipynb b/tutorials/how_to_create_custom_evaluation_pipeline.ipynb index 2f6024e..527f60a 100644 --- a/tutorials/how_to_create_custom_evaluation_pipeline.ipynb +++ b/tutorials/how_to_create_custom_evaluation_pipeline.ipynb @@ -2,27 +2,24 @@ "cells": [ { "cell_type": "markdown", + "metadata": {}, "source": [ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ourownstory/neural_prophet/blob/master/tutorials/UnderstandeTheBenchmarkingPipeline.ipynb)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# How to create a custom evaluation pipeline\n", "This tutorial takes you behind the scenes of the benchmark template and guides you in creating your custom evaluation\n", "pipeline by explaining the processing step-by-step.\n" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 1, + "metadata": {}, "outputs": [], "source": [ "if 'google.colab' in str(get_ipython()):\n", @@ -30,47 +27,42 @@ " #!pip install test-of-time # much faster, but may not have the latest upgrades/bugfixes\n", "\n", "import pandas as pd\n", + "from sklearn.preprocessing import StandardScaler\n", "from neuralprophet import set_log_level, set_random_seed\n", + "\n", "from tot.df_utils import _check_min_df_len, prep_or_copy_df, check_dataframe, handle_missing_data, split_df, return_df_in_original_format, maybe_drop_added_dates\n", - "from tot.exp_utils import evaluate_forecast\n", - "from tot.models_neuralprophet import NeuralProphetModel" - ], - "metadata": { - "collapsed": false - } + "from tot.models.models_neuralprophet import NeuralProphetModel\n", + "from tot.data_processing.scaler import Scaler\n", + "from tot.evaluation.metric_utils import calculate_averaged_metrics_per_experiment" + ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 2, + "metadata": {}, "outputs": [], "source": [ "set_log_level(\"ERROR\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [], - "metadata": { - "collapsed": false - } + "metadata": {}, + "source": [] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "The benchmark templates test-of-time offers are a quick and simple way to compare multiple models and datasets.\n", "Defining a benchmark and running it will trigger a pipeline that returns the benchmark results. The benchmark\n", "is sub-divided into multiple experiments, that are executed in the pipeline. Every experiment run follows the\n", "same evaluation steps. Let's have a closer look at the individual steps." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Evaluation processing steps\n", "1. Data-specific pre-processing:\n", @@ -79,6 +71,7 @@ " - Performs a basic sanity check\n", " - Handles missing data\n", " - Splits the data into train and test datasets\n", + " - [optional] Data transformation (standarization, scaling, etc)\n", "\n", "2. Model definition\n", " - Set random seed to ensure reproducibility\n", @@ -99,55 +92,46 @@ "\n", "7. Data-specific post-processing:\n", " - Drops any added dates\n", + " - [optional] Data inverse transformation\n", "\n", "8. Evaluation:\n", " - Evaluates the forecasts based on selected error metrics" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [], - "metadata": { - "collapsed": false - } + "metadata": {}, + "source": [] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Load data\n", "Let's load the AirPassenger dataset as an example dataset to walk through the pipeline step by step." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 3, + "metadata": {}, "outputs": [], "source": [ "data_location = \"https://raw.githubusercontent.com/ourownstory/neuralprophet-data/main/datasets/\"\n", "df_air = pd.read_csv(data_location + 'air_passengers.csv')" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### 1. Data-specific pre-processing" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 4, + "metadata": {}, "outputs": [], "source": [ "# prep_or_copy_df() ensures that the df has an \"ID\" column to be usable in the further process\n", @@ -161,23 +145,36 @@ " df=df_air,\n", " test_percentage=0.40,\n", ")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### [optional] Data transformation" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "scaler = Scaler(transformer=StandardScaler(), scaling_level=\"per_dataset\")\n", + "df_air_train, df_air_test = scaler.transform(df_air_train, df_air_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ "### 2. Model definition" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 6, + "metadata": {}, "outputs": [], "source": [ "set_random_seed(42)\n", @@ -188,175 +185,167 @@ " \"seasonality_mode\": \"multiplicative\",\n", " \"learning_rate\": 0.03,\n", " \"_data_params\":{},\n", + " \"normalize\": \"off\", # normalization should be disabled when normalizing data in the pre-processing step\n", "}\n", "model=model_class(params=params)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### 3. Model-specific data pre-processing" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 7, + "metadata": {}, "outputs": [], "source": [ "# check if train and test df contain enough samples\n", "_check_min_df_len(df=df_air_train, min_len=model.n_forecasts + model.n_lags)\n", "_check_min_df_len(df=df_air_test, min_len=model.n_forecasts)\n", "# extend the test df with historic values from the train df\n", - "df_air_test = model.maybe_extend_df(df_air_train, df_air_test)" - ], - "metadata": { - "collapsed": false - } + "df_air_test_extended = model.maybe_extend_df(df_air_train, df_air_test)" + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### 4. Fit model" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 8, + "metadata": {}, "outputs": [], "source": [ "model.model.fit(df=df_air_train, freq='MS', progress=\"none\", minimal=True)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### 5. Predict model" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 9, + "metadata": {}, "outputs": [], "source": [ "# the model-individual predict function outputs the forecasts as a df\n", "fcst_train = model.model.predict(df=df_air_train)\n", - "fcst_test = model.model.predict(df=df_air_test)" - ], - "metadata": { - "collapsed": false - } + "fcst_test = model.model.predict(df=df_air_test_extended)" + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### 6. Model-specific post-processing:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 10, + "metadata": {}, "outputs": [], "source": [ "# As you can see, the method is a class method and hence linked to the model\n", - "fcst_test, df_air_test = model.maybe_drop_added_values_from_df(fcst_test, df_air_test)" - ], - "metadata": { - "collapsed": false - } + "fcst_test = model.maybe_drop_added_values_from_df(fcst_test, df_air_test_extended)" + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### 7. Data-specific data post-processing:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 11, + "metadata": {}, "outputs": [], "source": [ - "# in case an 'ID' column was previously added, return_df_in_original_format() will remove it again\n", - "fcst_train_df = return_df_in_original_format(fcst_train, received_ID_col, received_single_time_series)\n", - "fcst_test_df = return_df_in_original_format(fcst_test, received_ID_col, received_single_time_series)\n", "# in case, missing data was imputed maybe_drop_added_dates() removes it again\n", - "fcst_train_df, df_air_train = maybe_drop_added_dates(fcst_train_df, df_air_train)\n", - "fcst_test_df, df_air_test = maybe_drop_added_dates(fcst_test_df, df_air_test)" - ], - "metadata": { - "collapsed": false - } + "fcst_train_df, df_air_train = maybe_drop_added_dates(fcst_train, df_air_train)\n", + "fcst_test_df, df_air_test = maybe_drop_added_dates(fcst_test, df_air_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### [optional] Data inverse transformation" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "fcst_train, fcst_test = scaler.inverse_transform(fcst_train, fcst_test)" + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### 8. Evaluation:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 13, + "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'MAPE': 8.895154, 'MAE': 37.84229, 'RMSE': 46.43487}\n" - ] + "data": { + "text/plain": [ + "{'MAPE': 5.789534623424212}" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "# evaluate_forecast() computes the selected error metrics\n", - "result_train, result_test = evaluate_forecast(fcst_train_df, fcst_test_df, metrics=['MAPE','MAE','RMSE'], metadata=None)\n", - "print(result_test)" - ], - "metadata": { - "collapsed": false - } + "result_train = calculate_averaged_metrics_per_experiment(\n", + " fcst_df=fcst_train, df_historic=fcst_train, metrics=[\"MAPE\"], metadata={}, freq=\"MS\")\n", + "result_test = calculate_averaged_metrics_per_experiment(\n", + " fcst_df=fcst_test, df_historic=fcst_train, metrics=[\"MAPE\"], metadata={}, freq=\"MS\")\n", + "result_test" + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "tot", "language": "python", - "name": "python3" + "name": "tot" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.6" + "pygments_lexer": "ipython3", + "version": "3.8.5" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 1 }