Skip to content

Commit

Permalink
Improve LiteLLM model detection method
Browse files Browse the repository at this point in the history
  • Loading branch information
davidmezzetti committed Dec 26, 2023
1 parent b907a5f commit b44d577
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 9 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@

extras["pipeline-image"] = ["imagehash>=4.2.1", "pillow>=7.1.2", "timm>=0.4.12"]

extras["pipeline-llm"] = ["litellm>=1.12.0", "llama-cpp-python>=0.2.20"]
extras["pipeline-llm"] = ["litellm>=1.15.8", "llama-cpp-python>=0.2.20"]

extras["pipeline-text"] = ["fasttext>=0.9.2", "sentencepiece>=0.1.91"]

Expand Down
18 changes: 10 additions & 8 deletions src/python/txtai/pipeline/llm/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
LiteLLM module
"""

import os
import contextlib

# Conditional import
try:
import litellm as api
Expand Down Expand Up @@ -35,11 +32,16 @@ def ismodel(path):

# pylint: disable=W0702
if isinstance(path, str) and LITELLM:
with open(os.devnull, "w", encoding="utf-8") as f, contextlib.redirect_stdout(f):
try:
return api.get_llm_provider(path)
except:
return False
debug = api.suppress_debug_info
try:
# Suppress debug messages for this test
api.suppress_debug_info = True
return api.get_llm_provider(path)
except:
return False
finally:
# Restore debug info value to original value
api.suppress_debug_info = debug

return False

Expand Down

0 comments on commit b44d577

Please sign in to comment.