Skip to content

Commit 01a362f

Browse files
ppwwyyxxfacebook-github-bot
authored andcommitted
get_detection_dataset_dicts(names=)
Summary: simplify argument name, otherwise `dataloader.train.datasets.dataset_names` is long and repetitive Reviewed By: theschnitz Differential Revision: D26667042 fbshipit-source-id: 92803ef87f4396513376f8a98d6787b0b424c10a
1 parent ab70550 commit 01a362f

File tree

1 file changed

+12
-14
lines changed

1 file changed

+12
-14
lines changed

detectron2/data/build.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -206,32 +206,30 @@ def short_name(x):
206206
)
207207

208208

209-
def get_detection_dataset_dicts(
210-
dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
211-
):
209+
def get_detection_dataset_dicts(names, filter_empty=True, min_keypoints=0, proposal_files=None):
212210
"""
213211
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
214212
215213
Args:
216-
dataset_names (str or list[str]): a dataset name or a list of dataset names
214+
names (str or list[str]): a dataset name or a list of dataset names
217215
filter_empty (bool): whether to filter out images without instance annotations
218216
min_keypoints (int): filter out images with fewer keypoints than
219217
`min_keypoints`. Set to 0 to do nothing.
220218
proposal_files (list[str]): if given, a list of object proposal files
221-
that match each dataset in `dataset_names`.
219+
that match each dataset in `names`.
222220
223221
Returns:
224222
list[dict]: a list of dicts following the standard dataset dict format.
225223
"""
226-
if isinstance(dataset_names, str):
227-
dataset_names = [dataset_names]
228-
assert len(dataset_names)
229-
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
230-
for dataset_name, dicts in zip(dataset_names, dataset_dicts):
224+
if isinstance(names, str):
225+
names = [names]
226+
assert len(names), names
227+
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
228+
for dataset_name, dicts in zip(names, dataset_dicts):
231229
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
232230

233231
if proposal_files is not None:
234-
assert len(dataset_names) == len(proposal_files)
232+
assert len(names) == len(proposal_files)
235233
# load precomputed proposals from proposal files
236234
dataset_dicts = [
237235
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
@@ -248,13 +246,13 @@ def get_detection_dataset_dicts(
248246

249247
if has_instances:
250248
try:
251-
class_names = MetadataCatalog.get(dataset_names[0]).thing_classes
252-
check_metadata_consistency("thing_classes", dataset_names)
249+
class_names = MetadataCatalog.get(names[0]).thing_classes
250+
check_metadata_consistency("thing_classes", names)
253251
print_instances_class_histogram(dataset_dicts, class_names)
254252
except AttributeError: # class names are not available for this dataset
255253
pass
256254

257-
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(dataset_names))
255+
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
258256
return dataset_dicts
259257

260258

0 commit comments

Comments
 (0)