Skip to content

Commit 399eabb

Browse files
authored
Update packages to latest versions (#101)
* Update packages to latest versions * Run black (macos) * Fix other black files * Update readthedocs yaml * Move conf.py * Fix conf.py conf * Resolve deprecation warning
1 parent 7e23776 commit 399eabb

File tree

12 files changed

+68
-62
lines changed

12 files changed

+68
-62
lines changed

.github/workflows/python-app.yml

+9-19
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,21 @@ jobs:
1717
max-parallel: 4
1818
matrix:
1919
platform: [ubuntu-latest]
20-
python-version: ["3.8", "3.10"]
20+
python-version: ["3.10", "3.11"]
2121

2222
runs-on: ${{ matrix.platform }}
2323

2424
steps:
25-
- uses: actions/checkout@v3
25+
- uses: actions/checkout@v4
2626
- name: Set up Python ${{ matrix.python-version }}
27-
uses: actions/setup-python@v4
27+
uses: actions/setup-python@v5
2828
with:
2929
python-version: ${{ matrix.python-version }}
3030
- name: Install
3131
run: |
3232
python -m pip install --upgrade pip
33-
pip install --upgrade wheel build setuptools
34-
python -m build .
33+
pip install --upgrade wheel
34+
python setup.py bdist_wheel
3535
pip install dist/*.whl
3636
- name: Test Import
3737
run: |
@@ -43,27 +43,17 @@ jobs:
4343
max-parallel: 4
4444
matrix:
4545
platform: [ubuntu-latest]
46-
python-version: ["3.8", "3.10"]
46+
python-version: ["3.10", "3.11"]
4747

4848
runs-on: ${{ matrix.platform }}
4949

5050
steps:
51-
- uses: actions/checkout@v3
51+
- uses: actions/checkout@v4
5252
- name: Set up Python ${{ matrix.python-version }}
53-
uses: actions/setup-python@v4
53+
uses: actions/setup-python@v5
5454
with:
5555
python-version: ${{ matrix.python-version }}
56-
- name: Get pip cache dir
57-
id: pip-cache
58-
run: |
59-
echo "::set-output name=dir::$(pip cache dir)"
60-
- name: pip cache
61-
uses: actions/cache@v3
62-
with:
63-
path: ${{ steps.pip-cache.outputs.dir }}
64-
key: ${{ runner.os }}-pip-py${{ matrix.python-version }}-${{ hashFiles('**/setup.cfg') }}
65-
restore-keys: |
66-
${{ runner.os }}-pip-py${{ matrix.python-version }}-
56+
cache: 'pip'
6757
- name: Install dependencies
6858
run: |
6959
python -m pip install --upgrade pip

.readthedocs.yml

+17-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,23 @@
1+
# Read the Docs configuration file for Sphinx projects
2+
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3+
4+
# Required
5+
version: 2
6+
7+
# Set the OS, Python version and other tools you might need
8+
build:
9+
os: ubuntu-22.04
10+
tools:
11+
python: "3.11"
12+
113
python:
2-
version: 3.8
314
install:
15+
- requirements: docs/requirements.txt
416
- method: pip
517
path: .
618
extra_requirements:
719
- docs
8-
system_packages: true
20+
21+
sphinx:
22+
builder: html
23+
configuration: docs/source/conf.py

docs/requirements.txt

-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1 @@
1-
sphinx>=3.2.0
2-
sphinx-rtd-theme>=0.5.1
3-
sphinxcontrib-katex
4-
sphinx-autodoc-typehints
51
torch --extra-index-url https://download.pytorch.org/whl/cpu

setup.cfg

+18-13
Original file line numberDiff line numberDiff line change
@@ -43,24 +43,29 @@ ignore_missing_imports=True
4343
install_requires =
4444
numpy>=1.22.4
4545
scipy>=1.8.1
46-
torch>=1.12
46+
torch>=2.0
4747
packages = find:
48-
python_requires = >=3.8
48+
python_requires = >=3.10
4949

5050
[options.extras_require]
5151
dev =
52-
black==22.10.0
53-
flake8==5.0.4
54-
mypy==0.991
55-
pytest==7.2.0
52+
black==24.10.0
53+
flake8==6.1.0
54+
mypy==1.13.0
55+
pytest==8.3.3
5656
tests =
57-
black==22.10.0
58-
flake8==5.0.4
59-
mypy==0.991
60-
numpy==1.23.5
61-
pytest==7.2.0
62-
scipy==1.9.3
63-
torch==1.13.0
57+
black==24.10.0
58+
flake8==6.1.0
59+
mypy==1.13.0
60+
numpy==2.1.3
61+
pytest==8.3.3
62+
scipy==1.14.1
63+
torch==2.5.1
64+
docs =
65+
sphinx>=3.2.0
66+
sphinx-rtd-theme>=0.5.1
67+
sphinxcontrib-katex
68+
sphinx-autodoc-typehints
6469

6570
[options.packages.find]
6671
exclude =

tests/data/create_old_data.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def create_interp_data():
2020
]
2121

2222
outputs = []
23-
for (shape, klength, is_complex) in test_params:
23+
for shape, klength, is_complex in test_params:
2424
torch.manual_seed(123)
2525
im_size = shape[2:-1]
2626

@@ -74,7 +74,7 @@ def create_nufft_data():
7474
]
7575

7676
outputs = []
77-
for (shape, klength, is_complex) in test_params:
77+
for shape, klength, is_complex in test_params:
7878
torch.manual_seed(123)
7979
im_size = shape[2:-1]
8080

@@ -128,7 +128,7 @@ def create_sense_nufft_data():
128128
]
129129

130130
outputs = []
131-
for (shape, klength, is_complex) in test_params:
131+
for shape, klength, is_complex in test_params:
132132
torch.manual_seed(123)
133133
im_size = shape[2:-1]
134134

tests/test_interp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def test_interp_accuracy():
1919
with open("tests/data/interp_data.pkl", "rb") as f:
2020
old_data = pickle.load(f)
2121

22-
for (image, ktraj, old_kdata) in old_data:
22+
for image, ktraj, old_kdata in old_data:
2323
im_size = image.shape[2:-1]
2424

2525
forw_ob = tkbn.KbInterp(im_size=im_size, grid_size=im_size)

tests/test_nufft.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def test_nufft_accuracy():
1818
with open("tests/data/nufft_data.pkl", "rb") as f:
1919
old_data = pickle.load(f)
2020

21-
for (image, ktraj, old_kdata) in old_data:
21+
for image, ktraj, old_kdata in old_data:
2222
im_size = image.shape[2:-1]
2323

2424
forw_ob = tkbn.KbNufft(im_size=im_size)

tests/test_sense_nufft.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def test_sense_nufft_accuracy():
1313
with open("tests/data/sense_nufft_data.pkl", "rb") as f:
1414
old_data = pickle.load(f)
1515

16-
for (image, ktraj, smaps, old_kdata) in old_data:
16+
for image, ktraj, smaps, old_kdata in old_data:
1717
im_size = image.shape[2:-1]
1818

1919
forw_ob = tkbn.KbNufft(im_size=im_size)

torchkbnufft/_nufft/fft.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def crop_dims(image: Tensor, dim_list: Tensor, end_list: Tensor) -> Tensor:
2626
"""Crops an n-dimensional Tensor."""
2727
image = torch.view_as_real(image) # index select only works for real
2828

29-
for (dim, end) in zip(dim_list, end_list):
29+
for dim, end in zip(dim_list, end_list):
3030
image = torch.index_select(image, dim, torch.arange(end, device=image.device))
3131

3232
return torch.view_as_complex(image)
@@ -64,7 +64,7 @@ def fft_and_scale(
6464

6565
# zero pad for oversampled nufft
6666
pad_sizes: List[int] = []
67-
for (gd, im) in zip(grid_size.flip((0,)), im_size.flip((0,))):
67+
for gd, im in zip(grid_size.flip((0,)), im_size.flip((0,))):
6868
pad_sizes.append(0)
6969
pad_sizes.append(int(gd - im))
7070

@@ -153,7 +153,7 @@ def fft_filter(image: Tensor, kernel: Tensor, norm: Optional[str] = "ortho") ->
153153
# set up n-dimensional zero pad
154154
# zero pad for oversampled nufft
155155
pad_sizes: List[int] = []
156-
for (gd, im) in zip(grid_size.flip((0,)), im_size.flip((0,))):
156+
for gd, im in zip(grid_size.flip((0,)), im_size.flip((0,))):
157157
pad_sizes.append(0)
158158
pad_sizes.append(int(gd - im))
159159

torchkbnufft/_nufft/interp.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ def table_interp_multiple_batches(
215215
) -> Tensor:
216216
"""Table interpolation with for loop over batch dimension."""
217217
kdat = []
218-
for (it_image, it_omega) in zip(image, omega):
218+
for it_image, it_omega in zip(image, omega):
219219
kdat.append(
220220
table_interp_one_batch(
221221
it_image.unsqueeze(0),
@@ -245,7 +245,7 @@ def table_interp_fork_over_batchdim(
245245
"""Table interpolation with forking over k-space."""
246246
# initialize the fork processes
247247
futures: List[torch.jit.Future[torch.Tensor]] = []
248-
for (image_chunk, omega_chunk) in zip(
248+
for image_chunk, omega_chunk in zip(
249249
image.tensor_split(num_forks), omega.tensor_split(num_forks)
250250
):
251251
futures.append(
@@ -409,11 +409,11 @@ def accum_tensor_index_add(
409409
) -> Tensor:
410410
"""We fork this function for the adjoint accumulation."""
411411
if batched_nufft:
412-
for (image_batch, arr_ind_batch, data_batch) in zip(image, arr_ind, data):
413-
for (image_coil, data_coil) in zip(image_batch, data_batch):
412+
for image_batch, arr_ind_batch, data_batch in zip(image, arr_ind, data):
413+
for image_coil, data_coil in zip(image_batch, data_batch):
414414
image_coil.index_add_(0, arr_ind_batch, data_coil)
415415
else:
416-
for (image_it, data_it) in zip(image, data):
416+
for image_it, data_it in zip(image, data):
417417
image_it.index_add_(0, arr_ind, data_it)
418418

419419
return image
@@ -427,7 +427,7 @@ def fork_and_accum(
427427
# initialize the fork processes
428428
futures: List[torch.jit.Future[torch.Tensor]] = []
429429
if batched_nufft:
430-
for (image_chunk, arr_ind_chunk, data_chunk) in zip(
430+
for image_chunk, arr_ind_chunk, data_chunk in zip(
431431
image.tensor_split(num_forks),
432432
arr_ind.tensor_split(num_forks),
433433
data.tensor_split(num_forks),
@@ -442,7 +442,7 @@ def fork_and_accum(
442442
)
443443
)
444444
else:
445-
for (image_chunk, data_chunk) in zip(
445+
for image_chunk, data_chunk in zip(
446446
image.tensor_split(num_forks), data.tensor_split(num_forks)
447447
):
448448
futures.append(
@@ -476,7 +476,7 @@ def calc_coef_and_indices_batch(
476476
"""For loop coef calculation over batch dim."""
477477
coef = []
478478
arr_ind = []
479-
for (tm_it, base_offset_it) in zip(tm, base_offset):
479+
for tm_it, base_offset_it in zip(tm, base_offset):
480480
coef_it, arr_ind_it = calc_coef_and_indices(
481481
tm_it,
482482
base_offset_it,
@@ -511,7 +511,7 @@ def calc_coef_and_indices_fork_over_batches(
511511
if batched_nufft:
512512
# initialize the fork processes
513513
futures: List[torch.jit.Future[Tuple[Tensor, Tensor]]] = []
514-
for (tm_chunk, base_offset_chunk) in zip(
514+
for tm_chunk, base_offset_chunk in zip(
515515
tm.tensor_split(num_forks),
516516
base_offset.tensor_split(num_forks),
517517
):
@@ -570,7 +570,7 @@ def sort_data(
570570
if batched_nufft:
571571
# loop over batch dimension to get sorted k-space
572572
results: List[Tuple[Tensor, Tensor, Tensor]] = []
573-
for (tm_it, omega_it, data_it) in zip(tm, omega, data):
573+
for tm_it, omega_it, data_it in zip(tm, omega, data):
574574
results.append(
575575
sort_one_batch(tm_it, omega_it, data_it.unsqueeze(0), grid_size)
576576
)

torchkbnufft/_nufft/spmat.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,8 @@ def calc_tensor_spmatrix(
9898
shape = coo.shape
9999

100100
interp_mats = (
101-
torch.sparse.FloatTensor(inds, real_vals, torch.Size(shape)), # type: ignore
102-
torch.sparse.FloatTensor(inds, imag_vals, torch.Size(shape)), # type: ignore
101+
torch.sparse_coo_tensor(inds, real_vals, torch.Size(shape)), # type: ignore
102+
torch.sparse_coo_tensor(inds, imag_vals, torch.Size(shape)), # type: ignore
103103
)
104104

105105
return interp_mats

torchkbnufft/modules/kbnufft.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ def toep_batch_loop(
445445
if len(kernel.shape) > len(image.shape[2:]):
446446
# run with batching for kernel
447447
if smaps.shape[0] == 1:
448-
for (mini_image, mini_kernel) in zip(image, kernel):
448+
for mini_image, mini_kernel in zip(image, kernel):
449449
mini_image = mini_image.unsqueeze(0) * smaps
450450
mini_image = tkbnF.fft_filter(
451451
image=mini_image, kernel=mini_kernel, norm=norm
@@ -457,7 +457,7 @@ def toep_batch_loop(
457457
)
458458
output.append(mini_image.squeeze(0))
459459
else:
460-
for (mini_image, smap, mini_kernel) in zip(image, smaps, kernel):
460+
for mini_image, smap, mini_kernel in zip(image, smaps, kernel):
461461
mini_image = mini_image.unsqueeze(0) * smap.unsqueeze(0)
462462
mini_image = tkbnF.fft_filter(
463463
image=mini_image, kernel=mini_kernel, norm=norm
@@ -469,7 +469,7 @@ def toep_batch_loop(
469469
)
470470
output.append(mini_image.squeeze(0))
471471
else:
472-
for (mini_image, smap) in zip(image, smaps):
472+
for mini_image, smap in zip(image, smaps):
473473
mini_image = mini_image.unsqueeze(0) * smap.unsqueeze(0)
474474
mini_image = tkbnF.fft_filter(
475475
image=mini_image, kernel=kernel, norm=norm

0 commit comments

Comments
 (0)