Skip to content
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""create assistant table

Revision ID: 8757b005d681
Revises: 8e7dc5eab0b0
Create Date: 2025-06-16 13:40:10.447538

"""
from alembic import op
import sqlalchemy as sa
import sqlmodel.sql.sqltypes


# revision identifiers, used by Alembic.
revision = "8757b005d681"
down_revision = "8e7dc5eab0b0"
branch_labels = None
depends_on = None


def upgrade():
op.create_table(
"openai_assistant",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("assistant_id", sa.VARCHAR(length=255), nullable=False),
sa.Column("name", sa.VARCHAR(length=255), nullable=False),
sa.Column("max_num_results", sa.Integer, nullable=False),
sa.Column("model", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("instructions", sa.Text(), nullable=False),
sa.Column("temperature", sa.Float(), nullable=False),
sa.Column("vector_store_id", sa.VARCHAR(length=255), nullable=False),
sa.Column("organization_id", sa.Integer(), nullable=False),
sa.Column("project_id", sa.Integer(), nullable=False),
sa.Column("inserted_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(
["organization_id"], ["organization.id"], ondelete="CASCADE"
),
sa.ForeignKeyConstraint(["project_id"], ["project.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)


def downgrade():
op.drop_table("openai_assistant")
187 changes: 178 additions & 9 deletions backend/app/api/routes/responses.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,20 @@
from typing import Optional
from typing import Optional, Dict, Any
import logging

import openai
from pydantic import BaseModel
from fastapi import APIRouter, Depends
from pydantic import BaseModel, Extra
from fastapi import APIRouter, Depends, BackgroundTasks, HTTPException
from openai import OpenAI
from sqlmodel import Session

from app.api.deps import get_current_user_org, get_db
from app.crud.credentials import get_provider_credential
from app.crud.assistants import get_assistant_by_id
from app.models import UserOrganization
from app.utils import APIResponse

logger = logging.getLogger(__name__)

router = APIRouter(tags=["responses"])


Expand All @@ -23,22 +27,32 @@

class ResponsesAPIRequest(BaseModel):
project_id: int
assistant_id: str
question: str
callback_url: Optional[str] = None
response_id: Optional[str] = None

class Config:
extra = (
Extra.allow
) # This allows additional fields to be included in the request


class ResponsesSyncAPIRequest(BaseModel):
project_id: int
model: str
instructions: str
vector_store_ids: list[str]
max_num_results: Optional[int] = 20
temperature: Optional[float] = 0.1
response_id: Optional[str] = None

question: str


class Diagnostics(BaseModel):
input_tokens: int
output_tokens: int
total_tokens: int

model: str


Expand All @@ -49,13 +63,16 @@

class _APIResponse(BaseModel):
status: str

response_id: str
message: str
chunks: list[FileResultChunk]

diagnostics: Optional[Diagnostics] = None

class Config:
extra = (
Extra.allow
) # This allows additional fields to be included in the response


class ResponsesAPIResponse(APIResponse[_APIResponse]):
pass
Expand All @@ -73,14 +90,166 @@
return results


def get_additional_data(request: dict) -> dict:
"""Extract additional data from request, excluding specific keys."""
return {

Check warning on line 95 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L95

Added line #L95 was not covered by tests
k: v
for k, v in request.items()
if k
not in {"project_id", "assistant_id", "callback_url", "response_id", "question"}
}


def process_response(
request: ResponsesAPIRequest, client: OpenAI, assistant, organization_id: int
):
"""Process a response and send callback with results."""
logger.info(
f"Starting generating response for assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)
try:
response = client.responses.create(
model=assistant.model,
previous_response_id=request.response_id,
instructions=assistant.instructions,
tools=[
{
"type": "file_search",
"vector_store_ids": [assistant.vector_store_id],
"max_num_results": assistant.max_num_results,
}
],
temperature=assistant.temperature,
input=[{"role": "user", "content": request.question}],
include=["file_search_call.results"],
)
response_chunks = get_file_search_results(response)
logger.info(
f"Successfully generated response: response_id={response.id}, assistant={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)

# Convert request to dict and include all fields
request_dict = request.model_dump()
callback_response = ResponsesAPIResponse.success_response(
data=_APIResponse(
status="success",
response_id=response.id,
message=response.output_text,
chunks=response_chunks,
diagnostics=Diagnostics(
input_tokens=response.usage.input_tokens,
output_tokens=response.usage.output_tokens,
total_tokens=response.usage.total_tokens,
model=response.model,
),
**{
k: v
for k, v in request_dict.items()
if k
not in {
"project_id",
"assistant_id",
"callback_url",
"response_id",
"question",
}
},
),
)
except openai.OpenAIError as e:
error_message = handle_openai_error(e)
logger.error(

Check warning on line 161 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L159-L161

Added lines #L159 - L161 were not covered by tests
f"OpenAI API error during response processing: {error_message}, project_id={request.project_id}, organization_id={organization_id}"
)
callback_response = ResponsesAPIResponse.failure_response(error=error_message)

Check warning on line 164 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L164

Added line #L164 was not covered by tests

if request.callback_url:
logger.info(
f"Sending callback to URL: {request.callback_url}, assistant={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)
from app.api.routes.threads import send_callback

send_callback(request.callback_url, callback_response.model_dump())
logger.info(
f"Callback sent successfully, assistant={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)


@router.post("/responses", response_model=dict)
async def responses(
request: ResponsesAPIRequest,
background_tasks: BackgroundTasks,
_session: Session = Depends(get_db),
_current_user: UserOrganization = Depends(get_current_user_org),
):
"""Asynchronous endpoint that processes requests in background."""
logger.info(
f"Processing response request for assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)

# Get assistant details
assistant = get_assistant_by_id(
_session, request.assistant_id, _current_user.organization_id
)
if not assistant:
logger.error(

Check warning on line 195 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L195

Added line #L195 was not covered by tests
f"Assistant not found: assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)
raise HTTPException(

Check warning on line 198 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L198

Added line #L198 was not covered by tests
status_code=404,
detail="Assistant not found or not active",
)

credentials = get_provider_credential(
session=_session,
org_id=_current_user.organization_id,
provider="openai",
project_id=request.project_id,
)
if not credentials or "api_key" not in credentials:
logger.error(

Check warning on line 210 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L210

Added line #L210 was not covered by tests
f"OpenAI API key not configured for org_id={_current_user.organization_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)
return {

Check warning on line 213 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L213

Added line #L213 was not covered by tests
"success": False,
"error": "OpenAI API key not configured for this organization.",
"data": None,
"metadata": None,
}

client = OpenAI(api_key=credentials["api_key"])

# Send immediate response
initial_response = {
"success": True,
"data": {
"status": "processing",
"message": "Response creation started",
"success": True,
},
"error": None,
"metadata": None,
}

# Schedule background task
background_tasks.add_task(
process_response, request, client, assistant, _current_user.organization_id
)
logger.info(
f"Background task scheduled for response processing: assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)

return initial_response


@router.post("/responses/sync", response_model=ResponsesAPIResponse)
async def responses_sync(
request: ResponsesAPIRequest,
request: ResponsesSyncAPIRequest,
_session: Session = Depends(get_db),
_current_user: UserOrganization = Depends(get_current_user_org),
):
"""
Temp synchronous endpoint for benchmarking OpenAI responses API
Synchronous endpoint for benchmarking OpenAI responses API
"""
credentials = get_provider_credential(
session=_session,
Expand Down
Loading