Skip to content

Commit 1e4e135

Browse files
Resolve messages formatting issues (#13095)
Signed-off-by: Emmanuel Ferdman <[email protected]>
1 parent 35b49e4 commit 1e4e135

File tree

6 files changed

+8
-8
lines changed

6 files changed

+8
-8
lines changed

python/llm/src/ipex_llm/serving/fastchat/model_worker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -470,7 +470,7 @@ async def api_model_details(request: Request):
470470

471471
if args.gpus:
472472
invalidInputError(len(args.gpus.split(",")) > args.num_gpus, f"Larger --num-gpus "
473-
"({args.num_gpus}) than --gpus {args.gpus}!")
473+
f"({args.num_gpus}) than --gpus {args.gpus}!")
474474
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
475475

476476
gptq_config = GptqConfig(

python/llm/src/ipex_llm/transformers/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -672,7 +672,7 @@ def load_low_bit(cls,
672672
else:
673673
invalidInputError(False,
674674
f'`torch_dtype` can be either `torch.dtype` or `"auto"`,'
675-
'but received {torch_dtype}')
675+
f'but received {torch_dtype}')
676676
dtype_orig = model_class._set_default_torch_dtype(torch_dtype)
677677

678678
# Pretrained Model

python/llm/src/ipex_llm/transformers/npu_model.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ def from_pretrained(cls, *args, **kwargs):
217217
max_prompt_len < max_context_len,
218218
(
219219
f"max_prompt_len ({max_prompt_len}) should be less"
220-
" than max_context_len ({max_context_len})"
220+
f" than max_context_len ({max_context_len})"
221221
),
222222
)
223223
optimize_kwargs = {
@@ -553,7 +553,7 @@ def load_low_bit(cls, pretrained_model_name_or_path: str, *model_args, **kwargs)
553553
invalidInputError(
554554
False,
555555
f'`torch_dtype` can be either `torch.dtype` or `"auto"`,'
556-
"but received {torch_dtype}",
556+
f"but received {torch_dtype}",
557557
)
558558
dtype_orig = model_class._set_default_torch_dtype(torch_dtype)
559559

@@ -588,7 +588,7 @@ def load_low_bit(cls, pretrained_model_name_or_path: str, *model_args, **kwargs)
588588
max_prompt_len < max_context_len,
589589
(
590590
f"max_prompt_len ({max_prompt_len}) should be less"
591-
" than max_context_len ({max_context_len})"
591+
f" than max_context_len ({max_context_len})"
592592
),
593593
)
594594
from ipex_llm.transformers.npu_models.convert_mp import optimize_llm_pre

python/llm/src/ipex_llm/transformers/npu_models/phi3.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ def phi3_attention_forward(
127127
invalidInputError(
128128
False,
129129
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)},"
130-
" but is {attention_mask.size()}"
130+
f" but is {attention_mask.size()}"
131131
)
132132
attn_weights = attn_weights + attention_mask
133133

python/llm/src/ipex_llm/transformers/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def load_state_dict(checkpoint_file: Union[str, os.PathLike]):
9292
except Exception as e:
9393
invalidInputError(False,
9494
f"Unable to load weights"
95-
"from pytorch checkpoint file for '{checkpoint_file}' "
95+
f"from pytorch checkpoint file for '{checkpoint_file}' "
9696
f"at '{checkpoint_file}'. ")
9797

9898

python/llm/src/ipex_llm/utils/lazy_load_torch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def load(offset: int, elm_count: int):
112112
data = fp.read(size)
113113
return torch.frombuffer(bytearray(data), dtype=dtype)
114114
description = f'storage data_type={data_type} ' \
115-
'path-in-zip={filename} path={self.zip_file.filename}'
115+
f'path-in-zip={filename} path={self.zip_file.filename}'
116116
return LazyStorage(load=load, kind=pid[1], description=description)
117117

118118
@staticmethod

0 commit comments

Comments
 (0)