Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35,648 changes: 35,648 additions & 0 deletions tools/visualize-icenet-forecast/2021_07_01_183913_forecast_results.csv

Large diffs are not rendered by default.

89 changes: 89 additions & 0 deletions tools/visualize-icenet-forecast/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
"""
Code taken from https://github.com/tom-andersson/icenet-paper and slightly adjusted
to fit the galaxy interface.
"""

import os
import pandas as pd
'''
Defines globals used throughout the codebase.
'''

###############################################################################
# Folder structure naming system
###############################################################################

data_folder = 'data'
obs_data_folder = os.path.join(data_folder, 'obs')
cmip6_data_folder = os.path.join(data_folder, 'cmip6')
mask_data_folder = os.path.join(data_folder, 'masks')
forecast_data_folder = os.path.join(data_folder, 'forecasts')
network_dataset_folder = os.path.join(data_folder, 'network_datasets')

dataloader_config_folder = 'dataloader_configs'

networks_folder = 'trained_networks'

results_folder = 'results'
forecast_results_folder = os.path.join(results_folder, 'forecast_results')
permute_and_predict_results_folder = os.path.join(results_folder, 'permute_and_predict_results')
uncertainty_results_folder = os.path.join(results_folder, 'uncertainty_results')

figure_folder = 'figures'

video_folder = 'videos'

active_grid_cell_file_format = 'active_grid_cell_mask_{}.npy'
land_mask_filename = 'land_mask.npy'
region_mask_filename = 'region_mask.npy'

###############################################################################
# Polar hole/missing months
###############################################################################

# Pre-defined polar hole radii (in number of 25km x 25km grid cells)
# The polar hole radii were determined from Sections 2.1, 2.2, and 2.3 of
# http://osisaf.met.no/docs/osisaf_cdop3_ss2_pum_sea-ice-conc-climate-data-record_v2p0.pdf
polarhole1_radius = 28
polarhole2_radius = 11
polarhole3_radius = 3

# Whether or not to mask out the 3rd polar hole mask from
# Nov 2005 to Dec 2015 with a radius of only 3 grid cells. Including it creates
# some complications when analysing performance on a validation set that
# overlaps with the 3rd polar hole period.
use_polarhole3 = False

polarhole1_fname = 'polarhole1_mask.npy'
polarhole2_fname = 'polarhole2_mask.npy'
polarhole3_fname = 'polarhole3_mask.npy'

# Final month that each of the polar holes apply
# NOTE: 1st of the month chosen arbitrarily throughout as always working wit
# monthly averages
polarhole1_final_date = pd.Timestamp('1987-06-01') # 1987 June
polarhole2_final_date = pd.Timestamp('2005-10-01') # 2005 Oct
polarhole3_final_date = pd.Timestamp('2015-12-01') # 2015 Dec

missing_dates = [pd.Timestamp('1986-4-1'), pd.Timestamp('1986-5-1'),
pd.Timestamp('1986-6-1'), pd.Timestamp('1987-12-1')]

###############################################################################
# Weights and biases config (https://docs.wandb.ai/guides/track/advanced/environment-variables)
###############################################################################

# Get API key from https://wandb.ai/authorize
WANDB_API_KEY = 'YOUR-KEY-HERE'
# Absolute path to store wandb generated files (folder must exist)
# Note: user must have write access
WANDB_DIR = '/path/to/wandb/dir'
# Absolute path to wandb config dir (
WANDB_CONFIG_DIR = '/path/to/wandb/config/dir'
WANDB_CACHE_DIR = '/path/to/wandb/cache/dir'

###############################################################################
# ECMWF details
###############################################################################

ECMWF_API_KEY = 'YOUR-KEY-HERE'
ECMWF_API_EMAIL = 'YOUR-KEY-HERE'
32 changes: 32 additions & 0 deletions tools/visualize-icenet-forecast/masks/.listing
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
drwxr-xr-x 2 ftp ftp 0 May 09 2017 .
drwxr-xr-x 2 ftp ftp 0 May 09 2017 ..
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901021200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901041200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901061200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901081200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901101200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901121200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901141200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901161200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901181200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901201200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901221200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901241200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901261200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901281200.nc
-rwxr-xr-x 1 ftp ftp 9856120 May 09 2017 ice_conc_nh_ease2-250_cdr-v2p0_197901301200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901021200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901041200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901061200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901081200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901101200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901121200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901141200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901161200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901181200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901201200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901221200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901241200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901261200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901281200.nc
-rwxr-xr-x 1 ftp ftp 9856141 Jun 08 2022 ice_conc_sh_ease2-250_cdr-v2p0_197901301200.nc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
190 changes: 190 additions & 0 deletions tools/visualize-icenet-forecast/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
"""
Code taken from https://github.com/tom-andersson/icenet-paper and slightly adjusted
to fit the galaxy interface.
"""
import sys
import os
import config
import numpy as np
import pandas as pd
import xarray as xr
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, UpSampling2D, \
concatenate, MaxPooling2D, Input
from tensorflow.keras.optimizers import Adam
sys.path.insert(0, os.path.join(os.getcwd(), 'icenet')) # if using jupyter kernel
'''
Defines the Python-based sea ice forecasting models, such as the IceNet architecture
and the linear trend extrapolation model.
'''

# Custom layers:
# --------------------------------------------------------------------


@tf.keras.utils.register_keras_serializable()
class TemperatureScale(tf.keras.layers.Layer):
'''
Implements the temperature scaling layer for probability calibration,
as introduced in Guo 2017 (http://proceedings.mlr.press/v70/guo17a.html).
'''
def __init__(self, **kwargs):
super(TemperatureScale, self).__init__()
self.temp = tf.Variable(initial_value=1.0, trainable=False,
dtype=tf.float32, name='temp')

def call(self, inputs):
''' Divide the input logits by the T value. '''
return tf.divide(inputs, self.temp)

def get_config(self):
''' For saving and loading networks with this custom layer. '''
return {'temp': self.temp.numpy()}


# Network architectures:
# --------------------------------------------------------------------

def unet_batchnorm(input_shape, loss, weighted_metrics, learning_rate=1e-4, filter_size=3,
n_filters_factor=1, n_forecast_months=1, use_temp_scaling=False,
n_output_classes=3,
**kwargs):
inputs = Input(shape=input_shape)

conv1 = Conv2D(np.int(64 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(np.int(64 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
bn1 = BatchNormalization(axis=-1)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(bn1)

conv2 = Conv2D(np.int(128 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(np.int(128 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
bn2 = BatchNormalization(axis=-1)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(bn2)

conv3 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
bn3 = BatchNormalization(axis=-1)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(bn3)

conv4 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
bn4 = BatchNormalization(axis=-1)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(bn4)

conv5 = Conv2D(np.int(512 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(np.int(512 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
bn5 = BatchNormalization(axis=-1)(conv5)

up6 = Conv2D(np.int(256 * n_filters_factor), 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2), interpolation='nearest')(bn5))
merge6 = concatenate([bn4, up6], axis=3)
conv6 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
bn6 = BatchNormalization(axis=-1)(conv6)

up7 = Conv2D(np.int(256 * n_filters_factor), 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2), interpolation='nearest')(bn6))
merge7 = concatenate([bn3, up7], axis=3)
conv7 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(np.int(256 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
bn7 = BatchNormalization(axis=-1)(conv7)

up8 = Conv2D(np.int(128 * n_filters_factor), 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2), interpolation='nearest')(bn7))
merge8 = concatenate([bn2, up8], axis=3)
conv8 = Conv2D(np.int(128 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(np.int(128 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
bn8 = BatchNormalization(axis=-1)(conv8)

up9 = Conv2D(np.int(64 * n_filters_factor), 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2), interpolation='nearest')(bn8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(np.int(64 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(np.int(64 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(np.int(64 * n_filters_factor), filter_size, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)

final_layer_logits = [(Conv2D(n_output_classes, 1, activation='linear')(conv9)) for i in range(n_forecast_months)]
final_layer_logits = tf.stack(final_layer_logits, axis=-1)

if use_temp_scaling:
# Temperature scaling of the logits
final_layer_logits_scaled = TemperatureScale()(final_layer_logits)
final_layer = tf.nn.softmax(final_layer_logits_scaled, axis=-2)
else:
final_layer = tf.nn.softmax(final_layer_logits, axis=-2)

model = Model(inputs, final_layer)

model.compile(optimizer=Adam(lr=learning_rate), loss=loss, weighted_metrics=weighted_metrics)

return model


# Benchmark models:
# --------------------------------------------------------------------


def linear_trend_forecast(forecast_month, n_linear_years='all', da=None, dataset='obs'):
'''
Returns a simple sea ice forecast based on a gridcell-wise linear extrapolation.

Parameters:
forecast_month (datetime.datetime): The month to forecast

n_linear_years (int or str): Number of past years to use for linear trend
extrapolation.

da (xr.DataArray): xarray data array to use instead of observational
data (used for setting up CMIP6 pre-training linear trend inputs in IceUNetDataPreProcessor).

dataset (str): 'obs' or 'cmip6'. If 'obs', missing observational SIC months
will be skipped

Returns:
output_map (np.ndarray): The output SIC map predicted
by fitting a least squares linear trend to the past n_linear_years
for the month being predicted.

sie (np.float): The predicted sea ice extend (SIE).
'''

if da is None:
with xr.open_dataset('data/obs/siconca_EASE.nc') as ds:
da = next(iter(ds.data_vars.values()))

valid_dates = [pd.Timestamp(date) for date in da.time.values]

input_dates = [forecast_month - pd.DateOffset(years=1 + lag) for lag in range(n_linear_years)]
input_dates

# Do not use missing months in the linear trend projection
input_dates = [date for date in input_dates if date not in config.missing_dates]

# Chop off input date from before data start
input_dates = [date for date in input_dates if date in valid_dates]

input_dates = sorted(input_dates)

# The actual number of past years used
actual_n_linear_years = len(input_dates)

da = da.sel(time=input_dates)

input_maps = np.array(da.data)

x = np.arange(actual_n_linear_years)
y = input_maps.reshape(actual_n_linear_years, -1)

# Fit the least squares linear coefficients
r = np.linalg.lstsq(np.c_[x, np.ones_like(x)], y, rcond=None)[0]

# y = mx + c
output_map = np.matmul(np.array([actual_n_linear_years, 1]), r).reshape(432, 432)

land_mask_path = os.path.join(config.mask_data_folder, config.land_mask_filename)
land_mask = np.load(land_mask_path)
output_map[land_mask] = 0.

output_map[output_map < 0] = 0.
output_map[output_map > 1] = 1.

sie = np.sum(output_map > 0.15) * 25**2

return output_map, sie
Loading