Skip to content

Commit 5eed4bb

Browse files
test: quivr server install and upload sample linkedin data
1 parent ab1a30c commit 5eed4bb

25 files changed

+11028
-12
lines changed

backend/core/.env.apitest

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
OPENAI_API_KEY="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"

backend/core/babbages_calculating_engine.txt

+3,153
Large diffs are not rendered by default.

backend/core/crawl/crawler.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,16 @@ def checkGithub(self):
4242
else:
4343
return False
4444

45+
def checkLinkedIn(self):
46+
if "linkedin.com" in self.url:
47+
return True
48+
else:
49+
return False
50+
4551

4652
def slugify(text):
47-
text = unicodedata.normalize("NFKD", text).encode("ascii", "ignore").decode("utf-8")
53+
text = unicodedata.normalize("NFKD", text).encode(
54+
"ascii", "ignore").decode("utf-8")
4855
text = re.sub(r"[^\w\s-]", "", text).strip().lower()
4956
text = re.sub(r"[-\s]+", "-", text)
5057
return text

backend/core/doc/api_doc.md

-3
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,6 @@ Authorization: Bearer {api_key}
2222

2323
Replace `{api_key}` with the generated API key obtained from the frontend
2424

25-
You can find more information in the [Authentication](/docs/backend/api/getting_started) section of the documentation.
26-
27-
2825

2926
# How to use the API
3027

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
## API authentication
2+
3+
## Personality
4+
- Generate questions
5+
- Assess personality from Test Results
6+
7+
## Brain
8+
- Creat new brain
9+
- Get all brains
10+
11+
## Upload
12+
- File Upload
13+
14+
## Crawl
15+
- Website crawl
16+
17+
## Chat
18+
- create new chat room
19+

backend/core/llm/qa_base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ async def _acall_chain(self, chain, question, history):
169169
}
170170
)
171171

172-
async def generate_stream(self, question: str) -> AsyncIterable:
172+
async def generate_stream(self, question: str, memory=None) -> AsyncIterable:
173173
"""
174174
Generate a streaming answer to a given question by interacting with the language model.
175175
:param question: The question
@@ -196,7 +196,7 @@ async def generate_stream(self, question: str) -> AsyncIterable:
196196

197197
# The Chain that combines the question and answer
198198
qa = ConversationalRetrievalChain(
199-
retriever=self.vector_store.as_retriever(), combine_docs_chain=doc_chain, question_generator=standalone_question_generator)
199+
retriever=self.vector_store.as_retriever(), combine_docs_chain=doc_chain, question_generator=standalone_question_generator, memory=memory)
200200

201201
transformed_history = []
202202

backend/core/models/brain_entity.py

+1
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class BrainEntity(BaseModel):
1515
openai_api_key: Optional[str]
1616
status: Optional[str]
1717
prompt_id: Optional[UUID]
18+
linkedin: Optional[str]
1819
extraversion: Optional[int]
1920
neuroticism: Optional[int]
2021
conscientiousness: Optional[int]

backend/core/models/databases/supabase/brains.py

+1
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ class CreateBrainProperties(BaseModel):
1818
max_tokens: Optional[int] = 256
1919
openai_api_key: Optional[str] = None
2020
prompt_id: Optional[UUID] = None
21+
linkedin: Optional[str] = None
2122
extraversion: Optional[int] = None
2223
neuroticism: Optional[int] = None
2324
conscientiousness: Optional[int] = None

backend/core/models/personality.py

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
from typing import Any, List, Optional
2+
from pydantic import BaseModel
3+
4+
class TestResult(BaseModel):
5+
trait: Optional[str] = "Extraversion"
6+
positive: bool = True,
7+
question: Optional[str] = ""
8+
answer: Optional[int] = 0
9+
10+
# class TestResultList(BaseModel):
11+
# results: List[TestResult] = []

0 commit comments

Comments
 (0)