Skip to content

Commit 68c976a

Browse files
author
Lê Nam Khánh
authored
docs: fix typos in some files (#4101)
This PR fixes typos in the file file using codespell.
1 parent b68a25d commit 68c976a

File tree

3 files changed

+3
-3
lines changed

3 files changed

+3
-3
lines changed

src/llama_stack/providers/inline/inference/meta_reference/inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ async def load_model(self, model_id, llama_model) -> None:
146146
def check_model(self, request) -> None:
147147
if self.model_id is None or self.llama_model is None:
148148
raise RuntimeError(
149-
"No avaible model yet, please register your requested model or add your model in the resouces first"
149+
"No available model yet, please register your requested model or add your model in the resources first"
150150
)
151151
elif request.model != self.model_id:
152152
raise RuntimeError(f"Model mismatch: request model: {request.model} != loaded model: {self.model_id}")

src/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def save_checkpoint(
9191
if checkpoint_format == "meta" or checkpoint_format is None:
9292
self._save_meta_format_checkpoint(model_file_path, state_dict, adapter_only)
9393
elif checkpoint_format == "huggingface":
94-
# Note: for saving hugging face format checkpoints, we only suppport saving adapter weights now
94+
# Note: for saving hugging face format checkpoints, we only support saving adapter weights now
9595
self._save_hf_format_checkpoint(model_file_path, state_dict)
9696
else:
9797
raise ValueError(f"Unsupported checkpoint format: {format}")

src/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def llama_stack_instruct_to_torchtune_instruct(
2525
)
2626
input_messages = json.loads(sample[ColumnName.chat_completion_input.value])
2727

28-
assert len(input_messages) == 1, "llama stack intruct dataset format only supports 1 user message"
28+
assert len(input_messages) == 1, "llama stack instruct dataset format only supports 1 user message"
2929
input_message = input_messages[0]
3030

3131
assert "content" in input_message, "content not found in input message"

0 commit comments

Comments
 (0)