From 82f0e1018e392fc933be464ba5eab5e78476cb0d Mon Sep 17 00:00:00 2001 From: gavinfd-hl Date: Thu, 22 Aug 2024 17:22:25 +0100 Subject: [PATCH 1/3] Updating the project to use the v5 humanloop endpoints --- README.md | 8 ++-- app.py | 101 ++++++++++++++++++++++++------------------- requirements.txt | 6 +-- templates/index.html | 41 +++++++----------- 4 files changed, 79 insertions(+), 77 deletions(-) diff --git a/README.md b/README.md index 9646515..158461e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Humanloop API Quickstart - Python example app -This is an example app that shows you how to use the Humanloop API in a GPT-3 app. It uses the [Flask](https://flask.palletsprojects.com/en/2.2.x/) web framework and [Humanloop](https://humanloop.com) for data logging and model improvement. Check out the tutorial or follow the instructions below to get set up. +This is an example app that shows you how to use the Humanloop API in a GPT-4 app. It uses the [Flask](https://flask.palletsprojects.com/en/2.3.x/) web framework and [Humanloop](https://humanloop.com) for data logging and model improvement. Check out the tutorial or follow the instructions below to get set up. ## Setup @@ -30,10 +30,10 @@ This is an example app that shows you how to use the Humanloop API in a GPT-3 ap 6. Make a copy of the example environment variables file ```bash - $ cp .env.example .env + $ cp .example.env .env ``` -7. Add your [OpenAI API key](https://beta.openai.com/account/api-keys) and [Humanloop API key](https://app.humanloop.com/llama/settings) to the newly created `.env` file +7. Add your [OpenAI API key](https://platform.openai.com/api-keys) and [Humanloop API key](https://app.humanloop.com/account/api-keys) to the newly created `.env` file 8. Run the app @@ -41,4 +41,4 @@ This is an example app that shows you how to use the Humanloop API in a GPT-3 ap $ flask --debug run ``` -You should now be able to access the app at [http://localhost:5000](http://localhost:5000)! For the full context behind this example app, check out the [tutorial](https://beta.openai.com/docs/quickstart). +You should now be able to access the app at [http://localhost:5000](http://localhost:5000)! For the full context behind this example app, check out the [tutorial](https://platform.openai.com/docs/quickstart). diff --git a/app.py b/app.py index 3a0d0a8..8b621f7 100644 --- a/app.py +++ b/app.py @@ -1,16 +1,17 @@ import os -from humanloop import Humanloop +from humanloop import Humanloop, ProviderApiKeysParams from flask import Flask, redirect, render_template, request, url_for +from dotenv import load_dotenv + +load_dotenv() app = Flask(__name__) HUMANLOOP_API_KEY = os.getenv("HUMANLOOP_API_KEY") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") -humanloop = Humanloop( - api_key=HUMANLOOP_API_KEY, -) +hl = Humanloop(api_key=HUMANLOOP_API_KEY) @app.route("/", methods=["GET"]) @@ -18,45 +19,64 @@ def index(): return render_template( "index.html", result=request.args.get("result"), - data_id=request.args.get("data_id"), - feedback=request.args.get("feedback"), - copied=request.args.get("copied", False), + log_id=request.args.get("log_id"), + evaluation=request.args.get("evaluation"), ) @app.route("/get-question", methods=["POST"]) def get_question(): - # Make the request to GPT-3 + # Make the request to GPT-4 expert = request.form["Expert"] topic = request.form["Topic"] - # hl.complete automatically logs the data to your project. - complete_response = humanloop.complete_deployed( - project="learn-anything", + # hl.prompts.call automatically logs the data to your prompt. + call_response = hl.prompts.call( + path="learn-anything", inputs={"expert": expert, "topic": topic}, - provider_api_keys={"openai": OPENAI_API_KEY}, + prompt={ + "template": [ + { + "role": "system", + "content": "You are {{expert}}. Write a joke about {{topic}}.", + } + ], + "model": "gpt-4", + }, + provider_api_keys=ProviderApiKeysParams(openai=OPENAI_API_KEY), ) - data_id = complete_response.body["data"][0]["id"] - result = complete_response.body["data"][0]["output"] - print("data_id from completion: ", data_id) - return redirect(url_for("index", result=result, data_id=data_id)) + # The log_id is the ID of the log on Humanloop that was created by the call + log_id = call_response.id + log_response = call_response.logs[0] + + return redirect(url_for("index", result=log_response.output, log_id=log_id)) @app.route("/actions/thumbs-up", methods=["POST"]) def thumbs_up(): - data_id = request.args.get("data_id") + log_id = request.args.get("log_id") + + # We fetch the log from Humanloop to find which prompt it's associated with + log = hl.logs.get(id=log_id) + prompt_id = log.prompt.id + + # We fetch the prompt to find which evaluator it's associated with + prompt = hl.prompts.get(id=prompt_id) - # Send rating feedback to Humanloop - humanloop.feedback(type="rating", value="good", data_id=data_id) - print(f"Recorded 👍 feedback to datapoint: {data_id}") + # Note this will be empty if you haven't assigned a monitoring evaluator to your prompt + evaluator_id = prompt.evaluators[0].version_reference.file.id + + # Send rating evaluation to Humanloop using the evaluator_id and log_id + hl.evaluators.log(parent_id=log_id, id=evaluator_id, judgment="2") + print(f"Recorded 👍 evaluation to log: {log_id}") return redirect( url_for( "index", result=request.args.get("result"), - data_id=data_id, - feedback="👍", + log_id=log_id, + evaluation="👍", copied=request.args.get("copied", False), ) ) @@ -64,37 +84,28 @@ def thumbs_up(): @app.route("/actions/thumbs-down", methods=["POST"]) def thumbs_down(): - data_id = request.args.get("data_id") - - # Send rating feedback to Humanloop - humanloop.feedback(type="rating", value="bad", data_id=data_id) - print(f"Recorded 👎 feedback to datapoint: {data_id}") + log_id = request.args.get("log_id") - return redirect( - url_for( - "index", - result=request.args.get("result"), - data_id=data_id, - feedback="👎", - copied=request.args.get("copied", False), - ) - ) + # We fetch the log from Humanloop to find which prompt it's associated with + log = hl.logs.get(id=log_id) + prompt_id = log.prompt.id + # We fetch the prompt to find which evaluator it's associated with + prompt = hl.prompts.get(id=prompt_id) -@app.route("/actions/copy", methods=["POST"]) -def feedback(): - data_id = request.args.get("data_id") + # Note this will be empty if you haven't assigned a monitoring evaluator to your prompt + evaluator_id = prompt.evaluators[0].version_reference.file.id - # Send implicit feedback to Humanloop - humanloop.feedback(type="action", value="copy", data_id=data_id) - print(f"Recorded implicit feedback that user copied to datapoint: {data_id}") + # Send rating evaluation to Humanloop using the evaluator_id and log_id + hl.evaluators.log(parent_id=log_id, id=evaluator_id, judgment="1") + print(f"Recorded 👎 evaluation to log: {log_id}") return redirect( url_for( "index", result=request.args.get("result"), - data_id=data_id, - feedback=request.args.get("feedback"), - copied=True, + log_id=log_id, + evaluation="👎", + copied=request.args.get("copied", False), ) ) diff --git a/requirements.txt b/requirements.txt index 11c2d2f..4febd94 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ Flask==2.3.3 -humanloop==0.5.6 -openai==0.27.8 -python-dotenv==1.0.0 +humanloop==0.8.0b6 +openai==1.42.0 +python-dotenv==1.0.1 diff --git a/templates/index.html b/templates/index.html index 355a540..d354386 100644 --- a/templates/index.html +++ b/templates/index.html @@ -6,56 +6,47 @@ href="{{ url_for('static', filename='learning.png') }}" /> - - +

Learn anything from anyone

- +
{% if result %}
{{ result }}
+
+
-
-
-
-
-
-
From cc25c05479ba5e7ea82495982193047331e0992f Mon Sep 17 00:00:00 2001 From: gavinfd-hl Date: Thu, 22 Aug 2024 17:24:10 +0100 Subject: [PATCH 2/3] Adding a comment --- app.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/app.py b/app.py index 8b621f7..ceeb427 100644 --- a/app.py +++ b/app.py @@ -34,15 +34,16 @@ def get_question(): call_response = hl.prompts.call( path="learn-anything", inputs={"expert": expert, "topic": topic}, - prompt={ - "template": [ - { - "role": "system", - "content": "You are {{expert}}. Write a joke about {{topic}}.", - } - ], - "model": "gpt-4", - }, + # If you havent previously created a prompt, you can uncomment the prompt template below + # prompt={ + # "template": [ + # { + # "role": "system", + # "content": "You are {{expert}}. Write a joke about {{topic}}.", + # } + # ], + # "model": "gpt-4", + # }, provider_api_keys=ProviderApiKeysParams(openai=OPENAI_API_KEY), ) From bd69b40fe3aff8c3b17425f83dd60a0681d05545 Mon Sep 17 00:00:00 2001 From: gavinfd-hl Date: Thu, 22 Aug 2024 17:31:00 +0100 Subject: [PATCH 3/3] Removing typedict --- app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index ceeb427..7c9b465 100644 --- a/app.py +++ b/app.py @@ -1,6 +1,6 @@ import os -from humanloop import Humanloop, ProviderApiKeysParams +from humanloop import Humanloop from flask import Flask, redirect, render_template, request, url_for from dotenv import load_dotenv @@ -44,7 +44,7 @@ def get_question(): # ], # "model": "gpt-4", # }, - provider_api_keys=ProviderApiKeysParams(openai=OPENAI_API_KEY), + provider_api_keys={"openai": OPENAI_API_KEY}, ) # The log_id is the ID of the log on Humanloop that was created by the call