-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdask_test_debug2.py
More file actions
194 lines (163 loc) · 5.57 KB
/
dask_test_debug2.py
File metadata and controls
194 lines (163 loc) · 5.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import os
import pickle
import shutil
import time
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from copy import deepcopy
from datetime import timedelta
from functools import partial
import hydra
from omegaconf import DictConfig, open_dict
import joblib
import cc3d
import configargparse
import fastremap
import mwatershed
import numpy as np
import pandas as pd
import zarr
from dask import config as dask_cfg
from dask_jobqueue import SLURMCluster
from dask.distributed import Client, LocalCluster, as_completed
from tqdm import tqdm
from numba import jit
from datetime import datetime
from metrics import compute_metrics
# this only changes the configuration in the local process, and not subprocesses (like remote workers)
dask_cfg.set(
{
"distributed.scheduler.worker-ttl": "1h", # required because mwatershed blocks for a long time
"distributed.comm.timeouts.connect": "1h",
"distributed.comm.timeouts.tcp": "1h",
"distributed.admin.tick.limit": "30s" # increase time before triggering a warning (default limit of 3s remains in workers - https://github.com/dask/distributed/issues/3882)
}
)
def chunk_list(list_to_chunk, chunk_size): # todo:use zarr bag instead?
return [
list_to_chunk[i: i + chunk_size]
for i in range(0, len(list_to_chunk), chunk_size)
]
def patched_thresholding(aff, conf):
ijk_to_idx, patch_to_coords = get_mappings(aff, conf.patch_size)
patched_seg = zarr.open(conf.debug_patched_seg_path, mode="r")
fragment_agglomeration_flattened = None
agglomerated_seg = relabel_globally(
fragment_agglomeration_flattened,
patched_seg,
aff,
conf,
patch_to_coords,
ijk_to_idx,
)
return
def get_mappings(aff, patch_size):
"""
Returns coordinates of patches, and their indices.
"""
# x,y,z: coordinates
# i,j,k: patch indices (i.e. i*n <= x < (i+1)*n)
# idx: patch index (i.e. i * len(ys) * len(zs) + j * len(zs) + k)
xs = list(range(0, aff.shape[1], patch_size))
ys = list(range(0, aff.shape[2], patch_size))
zs = list(range(0, aff.shape[3], patch_size))
ijk_to_idx = {
(i, j, k): i * len(ys) * len(zs) + j * len(zs) + k
for i in range(len(xs))
for j in range(len(ys))
for k in range(len(zs))
}
patch_to_coords = {
(i, j, k): (
(xs[i], xs[i + 1] if i + 1 < len(xs) else None),
(ys[j], ys[j + 1] if j + 1 < len(ys) else None),
(zs[k], zs[k + 1] if k + 1 < len(zs) else None),
)
for i in range(len(xs))
for j in range(len(ys))
for k in range(len(zs))
}
return ijk_to_idx, patch_to_coords
def dummy_task(x, **kwargs):
from datetime import datetime
import time
print(datetime.now(), "started a dummy task", flush=True)
time.sleep(20)
print(datetime.now(), "ending a dummy task", flush=True)
return x
def relabel_globally(
fragment_agglomeration_flattened,
patched_seg,
aff,
conf,
patch_to_coords,
ijk_to_idx,
):
"""
Unite indexing within multiple cubes - if an object spans multiple cubes, it should have the same index everywhere
"""
print(f"Global relabeling...")
cubes = list(patch_to_coords.items())
cubes_batched = chunk_list(cubes, 5)
cluster = SLURMCluster(
cores=16,
memory="500GB",
processes=1,
worker_extra_args=["--resources", "processes=1"],
log_directory=f"/cajal/scratch/projects/misc/zuzur/slurm_logs/relabel/",
walltime="12:00:00"
)
cluster.adapt(minimum_jobs=1, maximum_jobs=32)
with Client(cluster) as client:
print(datetime.now(), "Dask relabeling Client Dashboard:", client.dashboard_link)
start_time = time.time()
ijk_to_idx_future = client.scatter(ijk_to_idx, broadcast=True)
print(datetime.now(), "scatttered")
futures = client.map(
partial(
dummy_task,
patched_seg=patched_seg,
fragment_agglomeration_chunks_path=f"{conf.path_root}/agglo_pkl_chunks",
ijk_to_idx=ijk_to_idx_future,
agglomerated_seg="agglomerated_seg",
conf=conf
),
range(17745),
resources={'processes': 1},
#batch_size=1
)
for _ in tqdm(as_completed(futures), total=len(range(17745)), smoothing=0):
pass # tqdm progress bar
print(f"Relabeling fragments took {timedelta(seconds=int(time.time() - start_time))}")
return agglomerated_seg
def main(conf):
print(conf)
aff = zarr.open(conf.aff_path, mode="r")
thr = conf.thresholds[0]
path_root = f"{conf.path_base}/{f'thr_{thr}'}/"
print(f"Root path: {path_root}")
os.makedirs(path_root, exist_ok=True)
with open_dict(conf):
conf.path_root = path_root
conf.thr = thr
start_time = time.time()
patched_thresholding(
aff,
conf
)
print(f"Patched thresholding took {timedelta(seconds=int(time.time() - start_time))}")
return
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise configargparse.ArgumentTypeError('Boolean value expected.')
@hydra.main(config_path="/cajal/u/zuzur/topo/segmentation/", config_name="patched_inference")
def main_wrapper(conf: DictConfig):
return main(conf)
if __name__ == "__main__":
main_wrapper()