|
1 | 1 | import os
|
2 | 2 | from typing import Any, Dict, List, Optional, Sequence
|
3 | 3 |
|
4 |
| -from llama_index.core import get_response_synthesizer |
5 | 4 | from llama_index.core.base.base_query_engine import BaseQueryEngine
|
6 | 5 | from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
|
7 | 6 | from llama_index.core.multi_modal_llms import MultiModalLLM
|
@@ -42,11 +41,10 @@ def create_query_engine(index, **kwargs) -> BaseQueryEngine:
|
42 | 41 | # If index is index is LlamaCloudIndex
|
43 | 42 | # use auto_routed mode for better query results
|
44 | 43 | if index.__class__.__name__ == "LlamaCloudIndex":
|
45 |
| - retrieval_mode = kwargs.get("retrieval_mode") |
46 |
| - if retrieval_mode is None: |
| 44 | + if kwargs.get("retrieval_mode") is None: |
47 | 45 | kwargs["retrieval_mode"] = "auto_routed"
|
48 |
| - if multimodal_llm: |
49 |
| - kwargs["retrieve_image_nodes"] = True |
| 46 | + if multimodal_llm: |
| 47 | + kwargs["retrieve_image_nodes"] = True |
50 | 48 | return index.as_query_engine(**kwargs)
|
51 | 49 |
|
52 | 50 |
|
@@ -86,14 +84,14 @@ class MultiModalSynthesizer(BaseSynthesizer):
|
86 | 84 | def __init__(
|
87 | 85 | self,
|
88 | 86 | multimodal_model: MultiModalLLM,
|
89 |
| - response_synthesizer: Optional[BaseSynthesizer] = None, |
| 87 | + response_synthesizer: Optional[BaseSynthesizer], |
90 | 88 | text_qa_template: Optional[BasePromptTemplate] = None,
|
91 | 89 | *args,
|
92 | 90 | **kwargs,
|
93 | 91 | ):
|
94 | 92 | super().__init__(*args, **kwargs)
|
95 | 93 | self._multi_modal_llm = multimodal_model
|
96 |
| - self._response_synthesizer = response_synthesizer or get_response_synthesizer() |
| 94 | + self._response_synthesizer = response_synthesizer |
97 | 95 | self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT_SEL
|
98 | 96 |
|
99 | 97 | def _get_prompts(self, **kwargs) -> Dict[str, Any]:
|
|
0 commit comments