We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 219183f commit 2b9ef0aCopy full SHA for 2b9ef0a
autoparallel/asynctp_ops.py
@@ -16,8 +16,8 @@
16
import torch.distributed._functional_collectives as funcol
17
import torch.distributed.distributed_c10d as c10d
18
from torch._C._autograd import DeviceType
19
-from torch.distributed._distributed_c10d import Work as _Work
20
-from torch.distributed._distributed_c10d import _register_work, _SymmetricMemory
+from torch._C._distributed_c10d import Work as _Work
+from torch._C._distributed_c10d import _register_work, _SymmetricMemory
21
from torch.distributed._symmetric_memory import get_symm_mem_workspace, rendezvous
22
23
_is_test_mode: bool = False
0 commit comments