-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathquery_rag_application.py
46 lines (40 loc) · 1.38 KB
/
query_rag_application.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from langchain_groq import ChatGroq
import os
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from main import store_pdf_data_in_vector_db
from dotenv import load_dotenv
load_dotenv()
llm = ChatGroq(
groq_api_key = os.getenv("groq_api_key"),
model_name="llama-3.3-70b-versatile",
)
embeddings = HuggingFaceEmbeddings(model_name = "sentence-transformers/all-mpnet-base-v2")
vector_store = Chroma(
embedding_function = embeddings,
persist_directory="./chroma_langchain_db"
)
def index_data(relevant_path):
"""
This function takes the full path and indexes the data
in vector database
call this function and pass the relevant path
"""
try:
store_pdf_data_in_vector_db(relevant_path)
print("data indexed successfully")
except Exception as e:
print(e, "some error occured")
def query_file_and_invoke_llm(question):
results = vector_store.similarity_search(
question,
k = 2,
filter = {"source":"pdf"}
)
relevant_text = "".join([i.page_content for i in results])
prompt = f"Given the content: {relevant_text}\n answer the quesry {question}"
response = llm.invoke(prompt)
return response.content
# while True:
# res = query_file_and_invoke_llm(input("enter your query"))
# print(res,"-----------------------------------------------------------")