File tree 3 files changed +13
-7
lines changed
3 files changed +13
-7
lines changed Original file line number Diff line number Diff line change @@ -1262,6 +1262,9 @@ def cluster_filtering_hiccups(
1262
1262
# large helper functions wrapping smaller step-specific ones
1263
1263
####################################################################
1264
1264
1265
+ def _compose_score_hist (tile , to_score , to_hist ):
1266
+ return to_hist (to_score (tile ))
1267
+
1265
1268
@pool_decorator
1266
1269
def scoring_and_histogramming_step (
1267
1270
clr ,
@@ -1300,7 +1303,7 @@ def scoring_and_histogramming_step(
1300
1303
to_hist = partial (histogram_scored_pixels , kernels = kernels , ledges = ledges )
1301
1304
1302
1305
# compose scoring and histogramming together :
1303
- job = lambda tile : to_hist ( to_score ( tile ) )
1306
+ job = partial ( _compose_score_hist , to_score = to_score , to_hist = to_hist )
1304
1307
1305
1308
# standard multiprocessing implementation
1306
1309
if nproc > 1 :
@@ -1388,7 +1391,7 @@ def scoring_and_extraction_step(
1388
1391
)
1389
1392
1390
1393
# compose scoring and histogramming together
1391
- job = lambda tile : to_extract ( to_score ( tile ) )
1394
+ job = partial ( _compose_score_hist , to_score = to_score , to_hist = to_extract )
1392
1395
1393
1396
# standard multiprocessing implementation
1394
1397
if nproc > 1 :
Original file line number Diff line number Diff line change @@ -1035,6 +1035,10 @@ def per_region_smooth_cvd(
1035
1035
)
1036
1036
1037
1037
return cvd
1038
+
1039
+ def _balance_transform (p , weight1 , weight2 ):
1040
+ return p ["count" ] * p [weight1 ] * p [weight2 ]
1041
+
1038
1042
# user-friendly wrapper for diagsum_symm and diagsum_pairwise - part of new "public" API
1039
1043
@pool_decorator
1040
1044
def expected_cis (
@@ -1179,7 +1183,7 @@ def expected_cis(
1179
1183
# define balanced data transform:
1180
1184
weight1 = clr_weight_name + "1"
1181
1185
weight2 = clr_weight_name + "2"
1182
- transforms = {"balanced" : lambda p : p [ "count" ] * p [ weight1 ] * p [ weight2 ] }
1186
+ transforms = {"balanced" : partial ( _balance_transform , weight1 = weight1 , weight2 = weight2 ) }
1183
1187
else :
1184
1188
raise ValueError (
1185
1189
"cooler is not balanced, or"
@@ -1317,8 +1321,7 @@ def expected_trans(
1317
1321
# define balanced data transform:
1318
1322
weight1 = clr_weight_name + "1"
1319
1323
weight2 = clr_weight_name + "2"
1320
- transforms = {"balanced" : lambda p : p ["count" ] * p [weight1 ] * p [weight2 ]}
1321
-
1324
+ transforms = {"balanced" : partial (_balance_transform , weight1 = weight1 , weight2 = weight2 )}
1322
1325
else :
1323
1326
raise ValueError (
1324
1327
"cooler is not balanced, or"
Original file line number Diff line number Diff line change 2
2
import numpy as np
3
3
import pandas as pd
4
4
import bioframe
5
- from multiprocess import Pool
5
+ from multiprocessing import Pool
6
6
from functools import wraps
7
7
import logging
8
8
@@ -526,7 +526,7 @@ def wrapper(*args, **kwargs):
526
526
# If alternative or third party map functors are provided
527
527
if "map_functor" in kwargs .keys ():
528
528
logging .info (f"using an alternative map functor: { kwargs ['map_functor' ]} " )
529
- return func (* args , ** kwargs , map_functor = kwargs [ "map_functor" ] )
529
+ return func (* args , ** kwargs )
530
530
531
531
pool = None
532
532
if "nproc" in kwargs .keys ():
You can’t perform that action at this time.
0 commit comments