Skip to content

Fix some code annotation typos. #37102

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 2, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/transformers/configuration_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1184,7 +1184,7 @@ def recursive_diff_dict(dict_a, dict_b, config_obj=None):
Helper function to recursively take the diff between two nested dictionaries. The resulting diff only contains the
values from `dict_a` that are different from values in `dict_b`.

dict_b : the default config dictionnary. We want to remove values that are in this one
dict_b : the default config dictionary. We want to remove values that are in this one
"""
diff = {}
default = config_obj.__class__().to_dict() if config_obj is not None else {}
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/modeling_gguf_pytorch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def get_gguf_hf_weights_map(
# hack: ggufs have a different name for cohere
if model_type == "cohere":
model_type = "command-r"
if model_type == "qwen2_moe":
elif model_type == "qwen2_moe":
model_type = "qwen2moe"
arch = None
for key, value in MODEL_ARCH_NAMES.items():
Expand Down Expand Up @@ -346,7 +346,7 @@ def load_gguf_checkpoint(gguf_checkpoint_path, return_tensors=False, model_to_lo
Args:
gguf_checkpoint_path (`str`):
The path the to GGUF file to load
return_tensors (`bool`, defaults to `True`):
return_tensors (`bool`, defaults to `False`):
Whether to read the tensors from the file and return them. Not doing so is faster
and only loads the metadata in memory.
"""
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/bark/processing_bark.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def from_pretrained(
- a path to a *directory* containing a processor saved using the [`~BarkProcessor.save_pretrained`]
method, e.g., `./my_model_directory/`.
speaker_embeddings_dict_path (`str`, *optional*, defaults to `"speaker_embeddings_path.json"`):
The name of the `.json` file containing the speaker_embeddings dictionnary located in
The name of the `.json` file containing the speaker_embeddings dictionary located in
`pretrained_model_name_or_path`. If `None`, no speaker_embeddings is loaded.
**kwargs
Additional keyword arguments passed along to both
Expand All @@ -105,7 +105,7 @@ def from_pretrained(
logger.warning(
f"""`{os.path.join(pretrained_processor_name_or_path, speaker_embeddings_dict_path)}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`."""
dictionary if wanted, otherwise set `speaker_embeddings_dict_path=None`."""
)
speaker_embeddings = None
else:
Expand Down Expand Up @@ -135,7 +135,7 @@ def save_pretrained(
Directory where the tokenizer files and the speaker embeddings will be saved (directory will be created
if it does not exist).
speaker_embeddings_dict_path (`str`, *optional*, defaults to `"speaker_embeddings_path.json"`):
The name of the `.json` file that will contains the speaker_embeddings nested path dictionnary, if it
The name of the `.json` file that will contains the speaker_embeddings nested path dictionary, if it
exists, and that will be located in `pretrained_model_name_or_path/speaker_embeddings_directory`.
speaker_embeddings_directory (`str`, *optional*, defaults to `"speaker_embeddings/"`):
The name of the folder in which the speaker_embeddings arrays will be saved.
Expand Down Expand Up @@ -246,7 +246,7 @@ def __call__(
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
voice_preset (`str`, `Dict[np.ndarray]`):
The voice preset, i.e the speaker embeddings. It can either be a valid voice_preset name, e.g
`"en_speaker_1"`, or directly a dictionnary of `np.ndarray` embeddings for each submodel of `Bark`. Or
`"en_speaker_1"`, or directly a dictionary of `np.ndarray` embeddings for each submodel of `Bark`. Or
it can be a valid file name of a local `.npz` single voice preset.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/utils/hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -1150,7 +1150,7 @@ def create_and_tag_model_card(
The list of tags to add in the model card
token (`str`, *optional*):
Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token.
ignore_metadata_errors (`str`):
ignore_metadata_errors (`bool`, *optional*, defaults to `False`):
If True, errors while parsing the metadata section will be ignored. Some information might be lost during
the process. Use it at your own risk.
"""
Expand Down