Skip to content
This repository was archived by the owner on Sep 11, 2023. It is now read-only.

Commit 41514f5

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent f761ddb commit 41514f5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+15
-108
lines changed

notebooks/2021-08/2021-08-25/video.py

-1
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@
6868
channel_indexes = [1, 8, 9]
6969
satellite_data = []
7070
for channel_index in channel_indexes:
71-
7271
# renormalize
7372
satellite_data.append(
7473
data["sat_data"][batch_index, :, :, :, channel_index] * SAT_STD.values[channel_index]

notebooks/2021-08/2021-08-26/video.py

-1
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,6 @@
8888
channel_indexes = [1, 9, 8]
8989
satellite_data = []
9090
for channel_index in channel_indexes:
91-
9291
# renormalize
9392
satellite_data.append(
9493
data["sat_data"][batch_index, :, :, :, channel_index] * SAT_STD.values[channel_index]

notebooks/2021-09/2021-09-13/remove_hash.py

-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020

2121
for filenames in [train_filenames, validation_filenames]:
2222
for file in train_filenames:
23-
2423
print(file)
2524

2625
filename = file.split("/")[-1]

notebooks/2021-09/2021-09-14/gsp_centroid.py

-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616
# for index in range(0, len(shape_data_raw)):
1717
for index in range(140, 150):
18-
1918
# just select the first one
2019
shape_data = shape_data_raw.iloc[index : index + 1]
2120
shapes_dict = json.loads(shape_data["geometry"].to_json())

notebooks/2021-09/2021-09-14/gsp_duplicated.py

-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
duplicated_raw["Amount"] = range(0, len(duplicated_raw))
1313

1414
for i in range(0, 8, 2):
15-
1615
# just select the first one
1716
duplicated = duplicated_raw.iloc[i : i + 2]
1817
shapes_dict = json.loads(duplicated["geometry"].to_json())

notebooks/2021-09/2021-09-29/gsp_duplicated.py

-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
duplicated_raw["Amount"] = range(0, len(duplicated_raw))
1515

1616
for i in range(0, 8, 2):
17-
1817
# just select the first one
1918
duplicated = duplicated_raw.iloc[i : i + 2]
2019
shapes_dict = json.loads(duplicated["geometry"].to_json())

notebooks/2021-09/2021-09-29/video.py

-2
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141

4242

4343
def get_trace(dt):
44-
4544
# plot to check it looks right
4645
return go.Choroplethmapbox(
4746
geojson=shapes_dict,
@@ -54,7 +53,6 @@ def get_trace(dt):
5453

5554

5655
def get_frame(dt):
57-
5856
# plot to check it looks right
5957
return go.Choroplethmapbox(
6058
z=gps_data[dt],

notebooks/2021-10/2021-10-01/pydantic.py

-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212

1313
class Satellite(BaseModel):
14-
1514
# width: int = Field(..., g=0, description="The width of the satellite image")
1615
# height: int = Field(..., g=0, description="The width of the satellite image")
1716
# num_channels: int = Field(..., g=0, description="The width of the satellite image")
@@ -49,7 +48,6 @@ class Config:
4948

5049

5150
class Batch(BaseModel):
52-
5351
batch_size: int = Field(
5452
...,
5553
g=0,

notebooks/2021-10/2021-10-08/xr_compression.py

-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
def get_satellite_xrarray_data_array(
1010
batch_size, seq_length_5, satellite_image_size_pixels, number_sat_channels=10
1111
):
12-
1312
r = np.random.randn(
1413
# self.batch_size,
1514
seq_length_5,

notebooks/2021-10/2021-10-08/xr_pydantic.py

-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ def v_image_data(cls, v):
2525

2626

2727
class Batch(BaseModel):
28-
2928
batch_size: int = 0
3029
satellite: Satellite
3130

nowcasting_dataset/data_sources/data_source.py

-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,6 @@ def __post_init__(self):
8282
def _get_start_dt(
8383
self, t0_datetime_utc: Union[pd.Timestamp, pd.DatetimeIndex]
8484
) -> Union[pd.Timestamp, pd.DatetimeIndex]:
85-
8685
return t0_datetime_utc - self.history_duration
8786

8887
def _get_end_dt(

nowcasting_dataset/data_sources/fake/batch.py

-1
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,6 @@ def topographic_fake(
504504
# make batch of arrays
505505
xr_arrays = []
506506
for i in range(batch_size):
507-
508507
x, y = make_image_coords_osgb(
509508
size_x=image_size_pixels_width,
510509
size_y=image_size_pixels_height,

nowcasting_dataset/data_sources/gsp/eso.py

-1
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,6 @@ def get_gsp_shape_from_eso(
164164
shape_gpd["RegionID"] = range(1, len(shape_gpd) + 1)
165165

166166
if save_local_file:
167-
168167
# rename the columns to less than 10 characters
169168
shape_gpd_to_save = shape_gpd.copy()
170169
shape_gpd_to_save.rename(columns=rename_save_columns, inplace=True)

nowcasting_dataset/data_sources/gsp/gsp_data_source.py

-5
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,6 @@ def get_all_locations(self, t0_datetimes_utc: pd.DatetimeIndex) -> List[SpaceTim
173173
if total_gsp_nan_count > 0:
174174
assert Exception("There are nans in the GSP data. Can't get locations for all GSPs")
175175
else:
176-
177176
t0_datetimes_utc.name = "t0_datetime_utc"
178177

179178
# get all locations
@@ -236,7 +235,6 @@ def get_locations(self, t0_datetimes_utc: pd.DatetimeIndex) -> List[SpaceTimeLoc
236235

237236
total_gsp_nan_count = self.gsp_power.isna().sum().sum()
238237
if total_gsp_nan_count == 0:
239-
240238
# get random GSP metadata
241239
indexes = sorted(
242240
list(self.rng.integers(low=0, high=len(self.metadata), size=len(t0_datetimes_utc)))
@@ -249,7 +247,6 @@ def get_locations(self, t0_datetimes_utc: pd.DatetimeIndex) -> List[SpaceTimeLoc
249247
ids = list(metadata.index)
250248

251249
else:
252-
253250
logger.warning(
254251
"There are some nans in the gsp data, "
255252
"so to get x,y locations we have to do a big loop"
@@ -262,7 +259,6 @@ def get_locations(self, t0_datetimes_utc: pd.DatetimeIndex) -> List[SpaceTimeLoc
262259
ids = []
263260

264261
for t0_dt in t0_datetimes_utc:
265-
266262
# Choose start and end times
267263
start_dt = self._get_start_dt(t0_dt)
268264
end_dt = self._get_end_dt(t0_dt)
@@ -290,7 +286,6 @@ def get_locations(self, t0_datetimes_utc: pd.DatetimeIndex) -> List[SpaceTimeLoc
290286

291287
locations = []
292288
for i in range(len(x_centers_osgb)):
293-
294289
locations.append(
295290
SpaceTimeLocation(
296291
t0_datetime_utc=t0_datetimes_utc[i],

nowcasting_dataset/data_sources/gsp/pvlive.py

-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ def load_pv_gsp_raw_data_from_pvlive(
8989
future_tasks = []
9090
with futures.ThreadPoolExecutor(max_workers=1) as executor:
9191
for gsp_id in gsp_ids:
92-
9392
# set the first chunk start and end times
9493
start_chunk = first_start_chunk
9594
end_chunk = first_end_chunk

nowcasting_dataset/data_sources/metadata/metadata_model.py

-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,6 @@ def save_to_csv(self, path):
114114
metadata_df = pd.DataFrame(metadata_dict)
115115

116116
else:
117-
118117
metadata_df = pd.read_csv(filename)
119118

120119
metadata_df_extra = pd.DataFrame(metadata_dict)

nowcasting_dataset/data_sources/pv/live.py

-2
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@ def get_metadata_from_database(providers: List[str] = None) -> pd.DataFrame:
4343

4444
pv_system_all_df = []
4545
for provider in providers:
46-
4746
logger.debug(f"Get PV systems from database for {provider}")
4847

4948
with db_connection.get_session() as session:
@@ -136,7 +135,6 @@ def get_pv_power_from_database(
136135
logger.debug(f"Found {len(pv_yields_df)} pv yields")
137136

138137
if len(pv_yields_df) == 0:
139-
140138
data = create_empty_pv_data(end_utc=now, providers=providers, start_utc=start_utc)
141139

142140
return data

nowcasting_dataset/data_sources/pv/pv_data_source.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@ def get_data_model_for_batch():
9898
return PV
9999

100100
def _load_metadata(self):
101-
102101
logger.debug(f"Loading PV metadata from {self.files_groups}")
103102

104103
# collect all metadata together
@@ -155,7 +154,6 @@ def _load_metadata(self):
155154
logger.debug(f"Found {len(pv_metadata)} pv systems")
156155

157156
def _load_pv_power(self):
158-
159157
logger.debug(f"Loading PV Power data from {self.files_groups}")
160158

161159
if not self.is_live:
@@ -453,6 +451,7 @@ def get_locations(self, t0_datetimes_utc: pd.DatetimeIndex) -> List[SpaceTimeLoc
453451
Returns: x_locations, y_locations. Each has one entry per t0_datetime.
454452
Locations are in OSGB coordinates.
455453
"""
454+
456455
# Set this up as a separate function, so we can cache the result!
457456
@functools.cache # functools.cache requires Python >= 3.9
458457
def _get_pv_system_ids(t0_datetime: pd.Timestamp) -> pd.Int64Dtype:

nowcasting_dataset/data_sources/sun/raw_data_load_save.py

-3
Original file line numberDiff line numberDiff line change
@@ -49,16 +49,13 @@ def get_azimuth_and_elevation(
4949
names = []
5050
# loop over locations and find azimuth and elevation angles,
5151
with futures.ThreadPoolExecutor() as executor:
52-
5352
logger.debug("Setting up jobs")
5453

5554
# Submit tasks to the executor.
5655
future_azimuth_and_elevation_per_location = []
5756
for i in tqdm(range(len(x_centers))):
58-
5957
name = x_y_to_name(x_centers[i], y_centers[i])
6058
if name not in names:
61-
6259
lat, lon = geospatial.osgb_to_lat_lon(x=x_centers[i], y=y_centers[i])
6360

6461
future_azimuth_and_elevation = executor.submit(

nowcasting_dataset/data_sources/sun/sun_data_source.py

-3
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,6 @@ def get_example(self, location: SpaceTimeLocation) -> xr.Dataset:
6969
end_dt = self._get_end_dt(t0_datetime_utc)
7070

7171
if not self.load_live:
72-
7372
# The names of the columns get truncated when saving, therefore we need to look for the
7473
# name of the columns near the location we are looking for
7574
locations = np.array(
@@ -96,7 +95,6 @@ def get_example(self, location: SpaceTimeLocation) -> xr.Dataset:
9695
elevation = self.elevation.loc[start_dt:end_dt][name]
9796

9897
else:
99-
10098
latitude, longitude = osgb_to_lat_lon(x=x_center_osgb, y=y_center_osgb)
10199

102100
datestamps = pd.date_range(start=start_dt, end=end_dt, freq="5T").tolist()
@@ -115,7 +113,6 @@ def get_example(self, location: SpaceTimeLocation) -> xr.Dataset:
115113
return sun
116114

117115
def _load(self):
118-
119116
logger.info(f"Loading Sun data from {self.zarr_path}")
120117

121118
if not self.load_live:

nowcasting_dataset/dataset/batch.py

-2
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,6 @@ def load_netcdf(
137137

138138
# loop over data sources
139139
for data_source_name in data_sources_names:
140-
141140
local_netcdf_filename = os.path.join(
142141
local_netcdf_path, data_source_name, get_netcdf_filename(batch_idx)
143142
)
@@ -193,7 +192,6 @@ def load_netcdf(
193192

194193
# legacy NWP
195194
if "nwp" in batch_dict.keys():
196-
197195
nwp_rename_dict = {
198196
"x_index": "x_osgb_index",
199197
"y_index": "y_osgb_index",

nowcasting_dataset/dataset/split/method.py

-2
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,6 @@ def split_method(
8585
test_periods = unique_periods[unique_periods["modulo"].isin(test_indexes)]["period"]
8686

8787
elif method == "random":
88-
8988
# randomly sort indexes
9089
rng = np.random.default_rng(seed)
9190
unique_periods_in_dataset = rng.permutation(unique_periods_in_dataset)
@@ -108,7 +107,6 @@ def split_method(
108107
test_periods = pd.to_datetime(unique_periods_in_dataset[validation_test_split:])
109108

110109
elif method == "specific":
111-
112110
train_periods = unique_periods_in_dataset[
113111
unique_periods_in_dataset.isin(train_test_validation_specific.train)
114112
]

nowcasting_dataset/filesystem/utils.py

-1
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@ def delete_all_files_in_temp_path(path: Union[Path, str], delete_dirs: bool = Fa
9090
else:
9191
# loop over folder structure, but only delete files
9292
for root, dirs, files in filesystem.walk(path):
93-
9493
for f in files:
9594
filesystem.rm(f"{root}/{f}")
9695

nowcasting_dataset/manager/manager.py

-3
Original file line numberDiff line numberDiff line change
@@ -273,15 +273,13 @@ def sample_spatial_and_temporal_locations_for_examples(
273273
shuffled_t0_datetimes = pd.DatetimeIndex(shuffled_t0_datetimes)
274274

275275
if get_all_locations:
276-
277276
# note that the returned 'shuffled_t0_datetimes'
278277
# has duplicate datetimes for each location
279278
locations = self.data_source_which_defines_geospatial_locations.get_all_locations(
280279
t0_datetimes_utc=shuffled_t0_datetimes
281280
)
282281

283282
else:
284-
285283
locations = self.data_source_which_defines_geospatial_locations.get_locations(
286284
shuffled_t0_datetimes
287285
)
@@ -404,7 +402,6 @@ def create_batches(self, overwrite_batches: bool) -> None:
404402
for worker_id, (data_source_name, data_source) in enumerate(
405403
self.data_sources.items()
406404
):
407-
408405
# Get indexes of first batch and example. And subset locations_for_split.
409406
idx_of_first_batch = first_batches_to_create[split_name][data_source_name]
410407
idx_of_first_example = idx_of_first_batch * self.config.process.batch_size

nowcasting_dataset/manager/manager_live.py

-2
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,6 @@ def create_batches(self, use_async: Optional[bool] = True) -> None:
186186
async_results_from_create_batches = []
187187
an_error_has_occured = multiprocessing.Event()
188188
for worker_id, (data_source_name, data_source) in enumerate(self.data_sources.items()):
189-
190189
# Get indexes of first batch and example. And subset locations_for_split.
191190
idx_of_first_batch = 0
192191
locations = locations_for_each_example
@@ -226,7 +225,6 @@ def create_batches(self, use_async: Optional[bool] = True) -> None:
226225
# Sometimes when debuggin it is easy to use non async
227226
data_source.create_batches(**kwargs_for_create_batches)
228227
else:
229-
230228
async_result = pool.apply_async(
231229
data_source.create_batches,
232230
kwds=kwargs_for_create_batches,

nowcasting_dataset/utils.py

+2
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,7 @@ def shutdown(self, wait=True):
180180

181181
def arg_logger(func):
182182
"""A function decorator to log all the args and kwargs passed into a function."""
183+
183184
# Adapted from https://stackoverflow.com/a/23983263/732596
184185
@wraps(func)
185186
def inner_func(*args, **kwargs):
@@ -191,6 +192,7 @@ def inner_func(*args, **kwargs):
191192

192193
def exception_logger(func):
193194
"""A function decorator to log exceptions thrown by the inner function."""
195+
194196
# Adapted from
195197
# www.blog.pythonlibrary.org/2016/06/09/python-how-to-create-an-exception-logging-decorator
196198
@wraps(func)

scripts/generate_raw_data/get_raw_pv_gsp_data.py

+12-8
Original file line numberDiff line numberDiff line change
@@ -48,21 +48,24 @@ def fetch_data():
4848
data_df = load_pv_gsp_raw_data_from_pvlive(start=start, end=end, normalize_data=False)
4949

5050
# pivot to index as datetime_gmt, and columns as gsp_id
51-
data_generation_df = data_df.pivot(index="datetime_gmt", columns="gsp_id", values="generation_mw")
52-
data_installedcapacity_df = data_df.pivot(index="datetime_gmt", columns="gsp_id", values="installedcapacity_mwp")
51+
data_generation_df = data_df.pivot(
52+
index="datetime_gmt", columns="gsp_id", values="generation_mw"
53+
)
54+
data_installedcapacity_df = data_df.pivot(
55+
index="datetime_gmt", columns="gsp_id", values="installedcapacity_mwp"
56+
)
5357
data_capacity_df = data_df.pivot(index="datetime_gmt", columns="gsp_id", values="capacity_mwp")
54-
data_updated_gmt_df = data_df.pivot(index="datetime_gmt", columns="gsp_id", values="updated_gmt")
58+
data_updated_gmt_df = data_df.pivot(
59+
index="datetime_gmt", columns="gsp_id", values="updated_gmt"
60+
)
5561
data_xarray = xr.Dataset(
5662
data_vars={
5763
"generation_mw": (("datetime_gmt", "gsp_id"), data_generation_df),
5864
"installedcapacity_mwp": (("datetime_gmt", "gsp_id"), data_installedcapacity_df),
5965
"capacity_mwp": (("datetime_gmt", "gsp_id"), data_capacity_df),
6066
"updated_gmt": (("datetime_gmt", "gsp_id"), data_updated_gmt_df),
6167
},
62-
coords={
63-
"datetime_gmt": data_generation_df.index,
64-
"gsp_id": data_generation_df.columns
65-
},
68+
coords={"datetime_gmt": data_generation_df.index, "gsp_id": data_generation_df.columns},
6669
)
6770

6871
# save config to file
@@ -71,7 +74,8 @@ def fetch_data():
7174

7275
# Make encoding
7376
encoding = {
74-
var: {"compressor": numcodecs.Blosc(cname="zstd", clevel=5)} for var in data_xarray.data_vars
77+
var: {"compressor": numcodecs.Blosc(cname="zstd", clevel=5)}
78+
for var in data_xarray.data_vars
7579
}
7680

7781
# save data to file

0 commit comments

Comments
 (0)