From eff30e2130868b46c9077dc7fe7c34f6488c7c98 Mon Sep 17 00:00:00 2001 From: Zach Mueller Date: Mon, 11 Dec 2023 09:58:12 -0500 Subject: [PATCH] Fix nb tests (#2230) * Fix nb tests * INclude bnb import * pprint * Try this time * greater than zero * Fix test * bnb * Clean --- src/accelerate/launchers.py | 2 +- .../test_utils/scripts/test_notebook.py | 10 ++++++++-- src/accelerate/utils/environment.py | 2 +- tests/test_cli.py | 12 ++++++++++++ tests/test_multigpu.py | 15 --------------- 5 files changed, 22 insertions(+), 19 deletions(-) diff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py index b01f6b9f98e..d92cd7ed553 100644 --- a/src/accelerate/launchers.py +++ b/src/accelerate/launchers.py @@ -151,7 +151,7 @@ def train(*args): ) # Check for specific libraries known to initialize CUDA that users constantly use problematic_imports = are_libraries_initialized("bitsandbytes") - if len(problematic_imports) > 1: + if len(problematic_imports) > 0: err = ( "Could not start distributed process. Libraries known to initialize CUDA upon import have been " "imported already. Please keep these imports inside your training function to try and help with this:" diff --git a/src/accelerate/test_utils/scripts/test_notebook.py b/src/accelerate/test_utils/scripts/test_notebook.py index 999fab34cbe..1f2f1bbcf65 100644 --- a/src/accelerate/test_utils/scripts/test_notebook.py +++ b/src/accelerate/test_utils/scripts/test_notebook.py @@ -13,7 +13,7 @@ def basic_function(): print(f"PartialState:\n{PartialState()}") -NUM_PROCESSES = os.environ.get("ACCELERATE_NUM_PROCESSES", 1) +NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1)) def test_can_initialize(): @@ -22,7 +22,9 @@ def test_can_initialize(): @require_bnb def test_problematic_imports(): - with raises(AssertionError, match="Please keep these imports"): + with raises(RuntimeError, match="Please keep these imports"): + import bitsandbytes as bnb # noqa: F401 + notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES) @@ -32,3 +34,7 @@ def main(): if is_bnb_available(): print("Test problematic imports (bnb)") test_problematic_imports() + + +if __name__ == "__main__": + main() diff --git a/src/accelerate/utils/environment.py b/src/accelerate/utils/environment.py index 3de72e51c15..12169b8d852 100644 --- a/src/accelerate/utils/environment.py +++ b/src/accelerate/utils/environment.py @@ -59,7 +59,7 @@ def are_libraries_initialized(*library_names: str) -> Dict[str, bool]: """ Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair. """ - return [lib_name for lib_name in library_names if lib_name in sys.modules] + return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()] def get_gpu_info(): diff --git a/tests/test_cli.py b/tests/test_cli.py index ccf2a30f62d..9a8b83c5586 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -24,6 +24,7 @@ from accelerate.commands.estimate import estimate_command, estimate_command_parser, gather_data from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import ( + require_multi_gpu, require_timm, require_transformers, run_command, @@ -40,6 +41,7 @@ class AccelerateLauncherTester(unittest.TestCase): mod_file = inspect.getfile(accelerate.test_utils) test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_cli.py"]) + notebook_launcher_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_notebook.py"]) base_cmd = ["accelerate", "launch"] config_folder = Path.home() / ".cache/huggingface/accelerate" @@ -87,6 +89,16 @@ def test_invalid_keys(self): def test_accelerate_test(self): execute_subprocess_async(["accelerate", "test"], env=os.environ.copy()) + @require_multi_gpu + def test_notebook_launcher(self): + """ + This test checks a variety of situations and scenarios + with the `notebook_launcher` + """ + cmd = ["python", self.notebook_launcher_path] + with patch_environment(omp_num_threads=1, accelerate_num_processes=2): + run_command(cmd, env=os.environ.copy()) + class TpuConfigTester(unittest.TestCase): """ diff --git a/tests/test_multigpu.py b/tests/test_multigpu.py index a479130b74f..20a9e5a87cb 100644 --- a/tests/test_multigpu.py +++ b/tests/test_multigpu.py @@ -22,7 +22,6 @@ from accelerate import Accelerator from accelerate.big_modeling import dispatch_model from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu -from accelerate.test_utils.testing import run_command from accelerate.utils import patch_environment @@ -34,9 +33,6 @@ def setUp(self): mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"] ) self.operation_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) - self.notebook_launcher_path = os.path.sep.join( - mod_file.split(os.path.sep)[:-1] + ["scripts", "test_notebook.py"] - ) @require_multi_gpu def test_multi_gpu(self): @@ -70,17 +66,6 @@ def test_distributed_data_loop(self): with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"): execute_subprocess_async(cmd, env=os.environ.copy()) - @require_multi_gpu - def test_notebook_launcher(self): - """ - This test checks a variety of situations and scenarios - with the `notebook_launcher` - """ - cmd = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.notebook_launcher_path] - print(f"Running {cmd}") - with patch_environment(omp_num_threads=1): - run_command(cmd, env=os.environ.copy()) - if __name__ == "__main__": accelerator = Accelerator()