Skip to content

Commit 797ce34

Browse files
committed
[temporal] [cont] Fix errors
[test] Add the tests for the instantiation of abstract evaluator 1 -- 3 [test] Add the tests for util 1 -- 2 [test] Add the tests for train_evaluator 1 -- 2 [refactor] [test] Clean up the pipeline classes and add tests for it 1 -- 2 [test] Add the tests for tae 1 -- 4 [fix] Fix an error due to the change in extract learning curve [experimental] Increase the coverage [test] Add tests for pipeline repr Since the modifications in tests removed the coverage on pipeline repr, I added tests to increase those parts. Basically, the decrease in the coverage happened due to the usage of dummy pipelines.
1 parent 98bd007 commit 797ce34

16 files changed

+769
-279
lines changed

autoPyTorch/api/base_task.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1046,7 +1046,7 @@ def _search(
10461046
DisableFileOutputParameters.y_opt in self._disable_file_output
10471047
and self.ensemble_size > 1
10481048
):
1049-
self._logger.warning(f"No ensemble will be created when {DisableFileOutputParameters.y_optimization}"
1049+
self._logger.warning(f"No ensemble will be created when {DisableFileOutputParameters.y_opt}"
10501050
f" is in disable_file_output")
10511051

10521052
self._memory_limit = memory_limit

autoPyTorch/evaluation/abstract_evaluator.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,9 @@ def _init_miscellaneous(self) -> None:
261261
self.predict_function = self._predict_proba
262262
263263
self.X_train, self.y_train = datamanager.train_tensors
264+
self.unique_train_labels = [
265+
list(np.unique(self.y_train[train_indices])) for train_indices, _ in self.splits
266+
]
264267
self.X_valid, self.y_valid, self.X_test, self.y_test = None, None, None, None
265268
if datamanager.val_tensors is not None:
266269
self.X_valid, self.y_valid = datamanager.val_tensors
@@ -383,7 +386,7 @@ def predict(
383386
self,
384387
X: Optional[np.ndarray],
385388
pipeline: BaseEstimator,
386-
label_examples: Optional[np.ndarray] = None
389+
unique_train_labels: Optional[List[int]] = None
387390
) -> Optional[np.ndarray]:
388391
"""
389392
A wrapper function to handle the prediction of regression or classification tasks.
@@ -393,7 +396,8 @@ def predict(
393396
A set of features to feed to the pipeline
394397
pipeline (BaseEstimator):
395398
A model that will take the features X return a prediction y
396-
label_examples (Optional[np.ndarray]):
399+
unique_train_labels (Optional[List[int]]):
400+
The unique labels included in the train split.
397401

398402
Returns:
399403
(np.ndarray):
@@ -417,7 +421,7 @@ def predict(
417421
prediction=pred,
418422
num_classes=self.num_classes,
419423
output_type=self.output_type,
420-
label_examples=label_examples
424+
unique_train_labels=unique_train_labels
421425
)
422426
423427
return pred
@@ -441,6 +445,10 @@ def _get_pipeline(self) -> BaseEstimator:
441445
A scikit-learn compliant pipeline which is not yet fit to the data.
442446
"""
443447
config = self.evaluator_params.configuration
448+
if not isinstance(config, (int, str, Configuration)):
449+
raise TypeError("The type of configuration must be either (int, str, Configuration), "
450+
f"but got type {type(config)}")
451+
444452
kwargs = dict(
445453
config=config,
446454
random_state=np.random.RandomState(self.fixed_pipeline_params.seed),
@@ -458,9 +466,6 @@ def _get_pipeline(self) -> BaseEstimator:
458466
exclude=self.fixed_pipeline_params.exclude,
459467
search_space_updates=self.fixed_pipeline_params.search_space_updates,
460468
**kwargs)
461-
else:
462-
raise ValueError("The type of configuration must be either (int, str, Configuration), "
463-
f"but got type {type(config)}")
464469
465470
def _loss(self, labels: np.ndarray, preds: np.ndarray) -> Dict[str, float]:
466471
"""SMAC follows a minimization goal, so the make_scorer

0 commit comments

Comments
 (0)