-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #20 from SyneRBI/main-backend
main (submission) & petric (backend) framework
- Loading branch information
Showing
9 changed files
with
268 additions
and
174 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,17 @@ | ||
from BSREM_common import run | ||
from petric import MetricsWithTimeout, get_data | ||
from sirf.contrib.BSREM.BSREM import BSREM1 | ||
from sirf.contrib.partitioner import partitioner | ||
|
||
run(num_subsets=16, transverse_slice=72) | ||
data = get_data(srcdir="./data/NeuroLF_Hoffman_Dataset", outdir="./output/BSREM_NeuroLF_Hoffman") | ||
num_subsets = 16 | ||
data_sub, acq_models, obj_funs = partitioner.data_partition(data.acquired_data, data.additive_term, data.mult_factors, | ||
num_subsets, initial_image=data.OSEM_image) | ||
# WARNING: modifies prior strength with 1/num_subsets (as currently needed for BSREM implementations) | ||
data.prior.set_penalisation_factor(data.prior.get_penalisation_factor() / len(obj_funs)) | ||
data.prior.set_up(data.OSEM_image) | ||
for f in obj_funs: # add prior evenly to every objective function | ||
f.set_prior(data.prior) | ||
|
||
algo = BSREM1(data_sub, obj_funs, initial=data.OSEM_image, initial_step_size=.3, relaxation_eta=.01, | ||
update_objective_interval=10) | ||
algo.run(5000, callbacks=[MetricsWithTimeout(transverse_slice=72)]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,17 @@ | ||
from BSREM_common import run | ||
from petric import MetricsWithTimeout, get_data | ||
from sirf.contrib.BSREM.BSREM import BSREM1 | ||
from sirf.contrib.partitioner import partitioner | ||
|
||
run(num_subsets=5, transverse_slice=None) | ||
data = get_data(srcdir="./data/Siemens_Vision600_thorax", outdir="./output/BSREM_Vision600_thorax") | ||
num_subsets = 5 | ||
data_sub, acq_models, obj_funs = partitioner.data_partition(data.acquired_data, data.additive_term, data.mult_factors, | ||
num_subsets, initial_image=data.OSEM_image) | ||
# WARNING: modifies prior strength with 1/num_subsets (as currently needed for BSREM implementations) | ||
data.prior.set_penalisation_factor(data.prior.get_penalisation_factor() / len(obj_funs)) | ||
data.prior.set_up(data.OSEM_image) | ||
for f in obj_funs: # add prior evenly to every objective function | ||
f.set_prior(data.prior) | ||
|
||
algo = BSREM1(data_sub, obj_funs, initial=data.OSEM_image, initial_step_size=.3, relaxation_eta=.01, | ||
update_objective_interval=10) | ||
algo.run(5000, callbacks=[MetricsWithTimeout()]) |
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,17 @@ | ||
from BSREM_common import run | ||
from petric import MetricsWithTimeout, get_data | ||
from sirf.contrib.BSREM.BSREM import BSREM1 | ||
from sirf.contrib.partitioner import partitioner | ||
|
||
run(num_subsets=7, transverse_slice=72, coronal_slice=109) | ||
data = get_data(srcdir="./data/Siemens_mMR_NEMA_IQ", outdir="./output/BSREM_mMR_NEMA_IQ") | ||
num_subsets = 7 | ||
data_sub, acq_models, obj_funs = partitioner.data_partition(data.acquired_data, data.additive_term, data.mult_factors, | ||
num_subsets, initial_image=data.OSEM_image) | ||
# WARNING: modifies prior strength with 1/num_subsets (as currently needed for BSREM implementations) | ||
data.prior.set_penalisation_factor(data.prior.get_penalisation_factor() / len(obj_funs)) | ||
data.prior.set_up(data.OSEM_image) | ||
for f in obj_funs: # add prior evenly to every objective function | ||
f.set_prior(data.prior) | ||
|
||
algo = BSREM1(data_sub, obj_funs, initial=data.OSEM_image, initial_step_size=.3, relaxation_eta=.01, | ||
update_objective_interval=10) | ||
algo.run(5000, callbacks=[MetricsWithTimeout(transverse_slice=72, coronal_slice=109)]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,32 +1,52 @@ | ||
"""Usage in notebook.ipynb: | ||
from main import Submission, submission_callbacks | ||
from cil.optimisation.utilities import callbacks | ||
data = "TODO" | ||
metrics = [callbacks.ProgressCallback()] | ||
"""Main file to modify for submissions. It is used by e.g. example.ipynb and petric.py as follows: | ||
algorithm = Submission(data) | ||
algorithm.run(np.inf, callbacks=metrics + submission_callbacks) | ||
>>> from main import Submission, submission_callbacks | ||
>>> from petric import data, metrics | ||
>>> algorithm = Submission(data) | ||
>>> algorithm.run(np.inf, callbacks=metrics + submission_callbacks) | ||
""" | ||
from cil.optimisation.algorithms import GD | ||
from cil.optimisation.utilities.callbacks import Callback | ||
|
||
from cil.optimisation.algorithms import Algorithm | ||
from cil.optimisation.utilities import callbacks | ||
from petric import Dataset | ||
from sirf.contrib.BSREM.BSREM import BSREM1 | ||
from sirf.contrib.partitioner import partitioner | ||
|
||
class EarlyStopping(Callback): | ||
def __call__(self, algorithm): | ||
if algorithm.x <= -15: # arbitrary stopping criterion | ||
raise StopIteration | ||
assert issubclass(BSREM1, Algorithm) | ||
|
||
|
||
submission_callbacks = [EarlyStopping()] | ||
class MaxIteration(callbacks.Callback): | ||
""" | ||
The organisers try to `Submission(data).run(inf)` i.e. for infinite iterations (until timeout). | ||
This callback forces stopping after `max_iteration` instead. | ||
""" | ||
def __init__(self, max_iteration: int, verbose: int = 1): | ||
super().__init__(verbose) | ||
self.max_iteration = max_iteration | ||
|
||
def __call__(self, algorithm: Algorithm): | ||
if algorithm.iteration >= self.max_iteration: | ||
raise StopIteration | ||
|
||
class Submission(GD): | ||
def __init__(self, data, *args, **kwargs): | ||
super().__init__(*args, **kwargs) | ||
# Your code here | ||
self.data = data | ||
|
||
def update(self): | ||
# Your code here | ||
return super().update() | ||
class Submission(BSREM1): | ||
# note that `issubclass(BSREM1, Algorithm) == True` | ||
def __init__(self, data: Dataset, num_subsets: int = 7, update_objective_interval: int = 10): | ||
""" | ||
Initialisation function, setting up data & (hyper)parameters. | ||
NB: in practice, `num_subsets` should likely be determined from the data. | ||
This is just an example. Try to modify and improve it! | ||
""" | ||
data_sub, acq_models, obj_funs = partitioner.data_partition(data.acquired_data, data.additive_term, | ||
data.mult_factors, num_subsets, | ||
initial_image=data.OSEM_image) | ||
# WARNING: modifies prior strength with 1/num_subsets (as currently needed for BSREM implementations) | ||
data.prior.set_penalisation_factor(data.prior.get_penalisation_factor() / len(obj_funs)) | ||
data.prior.set_up(data.OSEM_image) | ||
for f in obj_funs: # add prior evenly to every objective function | ||
f.set_prior(data.prior) | ||
|
||
super().__init__(data_sub, obj_funs, initial=data.OSEM_image, initial_step_size=.3, relaxation_eta=.01, | ||
update_objective_interval=update_objective_interval) | ||
|
||
|
||
submission_callbacks = [MaxIteration(660)] |
Oops, something went wrong.