Skip to content

Commit

Permalink
Update dataclasses.py (#2292)
Browse files Browse the repository at this point in the history
  • Loading branch information
stas00 authored Dec 28, 2023
1 parent 848ed80 commit 403c071
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions src/accelerate/utils/dataclasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -930,10 +930,10 @@ class FullyShardedDataParallelPlugin:
use_orig_params: bool = field(
default=True,
metadata={
"help": "If `True`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres. "
"help": "If `True`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. "
"Useful in cases such as parameter-efficient fine-tuning. "
"Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). "
"This also enables to have different optimizer param groups. This should be `True` when creating optimizer object before preparing/wrapping the model with FSDP."
"This also enables multiple optimizer param groups. This should be `True` when creating an optimizer object before preparing/wrapping the model with FSDP."
},
)
param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field(
Expand Down

0 comments on commit 403c071

Please sign in to comment.