Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Evaluation function based on LLM grading #7

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions examples/readme_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@
# Prompting "hello there" and making sure there's "hi" or "hello"
# somewhere in the answer
PromptCase("hello there!", lambda x: evals.any_word(x.response, ["hi", "hello"])),
# Prompting "2+2" and making sure the answer is mathematically correct
PromptCase(
"What is 2+2?",
# we can put the results in number or in text as GPT will understand both
lambda x: evals.is_correct(x.response, question=x.prompt, expected="four"),
),
# Making sure 3 specific guitar players are in the top 10
# the score here is a percentage of the words found
PromptCase(
Expand Down
58 changes: 57 additions & 1 deletion promptimize/evals.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,11 @@
success, and a range in-between
"""

from typing import List
import os
from typing import List, Optional

from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate


def percentage_of_words(response: str, words: List[str], case_sensitive: bool = False) -> float:
Expand Down Expand Up @@ -120,3 +124,55 @@ def all(iteratable):

def any(iteratable):
return 1 if base_any([i == 1 for i in iteratable]) else 0


def is_correct(
response: str, question: str, expected: str, model_name: Optional[str] = None
) -> int:
"""
Query a LLM to calculate the correctness of the expected and the given response.

Args:
question (str): The question to be answered.
response (str): The answer given by the LLM.
expected (str): The expected answer.

Returns:
int: 1 if the answer in the response is CORRECT to the expected one; otherwise, 0.

Examples:
>>> is_correct("5", "7")
0
>>> is_correct("5", "5.0")
1
>>> is_correct("a dog", "a cat")
0
"""
model_name = model_name or "gpt-4" # GPT-4 works great for evaluating correctness
llm = ChatOpenAI(model_name=model_name, openai_api_key=os.environ.get("OPENAI_API_KEY"))
prompt = PromptTemplate(
input_variables=["response", "expected", "question"],
template=IS_CORRECT_TEMPLATE,
).format(response=response, expected=expected, question=question)

response = llm.predict(prompt)

return 0 if "INCORRECT" in response else 1


IS_CORRECT_TEMPLATE = """
You are a teacher grading an answer.
You are given a expected anwer and the actual answer. You are asked to score the answer as either CORRECT or INCORRECT, based on the context.

Example Format:
QUESTION: question here
EXPECTED ANSWER: expected answer here
ANSWER: actual answer here
GRADE: CORRECT or INCORRECT here

Grade the answers based ONLY on their accuracy compared with the expected ones, no matter of the actual accuracy. Ignore differences in punctuation and phrasing between the answer and true answer. It is OK if the answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin!

QUESTION: {question}
EXPECTED ANSWER: {expected}
ANSWER: {response}
GRADE: """