@@ -215,7 +215,7 @@ def table_interp_multiple_batches(
215
215
) -> Tensor :
216
216
"""Table interpolation with for loop over batch dimension."""
217
217
kdat = []
218
- for ( it_image , it_omega ) in zip (image , omega ):
218
+ for it_image , it_omega in zip (image , omega ):
219
219
kdat .append (
220
220
table_interp_one_batch (
221
221
it_image .unsqueeze (0 ),
@@ -245,7 +245,7 @@ def table_interp_fork_over_batchdim(
245
245
"""Table interpolation with forking over k-space."""
246
246
# initialize the fork processes
247
247
futures : List [torch .jit .Future [torch .Tensor ]] = []
248
- for ( image_chunk , omega_chunk ) in zip (
248
+ for image_chunk , omega_chunk in zip (
249
249
image .tensor_split (num_forks ), omega .tensor_split (num_forks )
250
250
):
251
251
futures .append (
@@ -409,11 +409,11 @@ def accum_tensor_index_add(
409
409
) -> Tensor :
410
410
"""We fork this function for the adjoint accumulation."""
411
411
if batched_nufft :
412
- for ( image_batch , arr_ind_batch , data_batch ) in zip (image , arr_ind , data ):
413
- for ( image_coil , data_coil ) in zip (image_batch , data_batch ):
412
+ for image_batch , arr_ind_batch , data_batch in zip (image , arr_ind , data ):
413
+ for image_coil , data_coil in zip (image_batch , data_batch ):
414
414
image_coil .index_add_ (0 , arr_ind_batch , data_coil )
415
415
else :
416
- for ( image_it , data_it ) in zip (image , data ):
416
+ for image_it , data_it in zip (image , data ):
417
417
image_it .index_add_ (0 , arr_ind , data_it )
418
418
419
419
return image
@@ -427,7 +427,7 @@ def fork_and_accum(
427
427
# initialize the fork processes
428
428
futures : List [torch .jit .Future [torch .Tensor ]] = []
429
429
if batched_nufft :
430
- for ( image_chunk , arr_ind_chunk , data_chunk ) in zip (
430
+ for image_chunk , arr_ind_chunk , data_chunk in zip (
431
431
image .tensor_split (num_forks ),
432
432
arr_ind .tensor_split (num_forks ),
433
433
data .tensor_split (num_forks ),
@@ -442,7 +442,7 @@ def fork_and_accum(
442
442
)
443
443
)
444
444
else :
445
- for ( image_chunk , data_chunk ) in zip (
445
+ for image_chunk , data_chunk in zip (
446
446
image .tensor_split (num_forks ), data .tensor_split (num_forks )
447
447
):
448
448
futures .append (
@@ -476,7 +476,7 @@ def calc_coef_and_indices_batch(
476
476
"""For loop coef calculation over batch dim."""
477
477
coef = []
478
478
arr_ind = []
479
- for ( tm_it , base_offset_it ) in zip (tm , base_offset ):
479
+ for tm_it , base_offset_it in zip (tm , base_offset ):
480
480
coef_it , arr_ind_it = calc_coef_and_indices (
481
481
tm_it ,
482
482
base_offset_it ,
@@ -511,7 +511,7 @@ def calc_coef_and_indices_fork_over_batches(
511
511
if batched_nufft :
512
512
# initialize the fork processes
513
513
futures : List [torch .jit .Future [Tuple [Tensor , Tensor ]]] = []
514
- for ( tm_chunk , base_offset_chunk ) in zip (
514
+ for tm_chunk , base_offset_chunk in zip (
515
515
tm .tensor_split (num_forks ),
516
516
base_offset .tensor_split (num_forks ),
517
517
):
@@ -570,7 +570,7 @@ def sort_data(
570
570
if batched_nufft :
571
571
# loop over batch dimension to get sorted k-space
572
572
results : List [Tuple [Tensor , Tensor , Tensor ]] = []
573
- for ( tm_it , omega_it , data_it ) in zip (tm , omega , data ):
573
+ for tm_it , omega_it , data_it in zip (tm , omega , data ):
574
574
results .append (
575
575
sort_one_batch (tm_it , omega_it , data_it .unsqueeze (0 ), grid_size )
576
576
)
0 commit comments