Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""create assistant table

Revision ID: 8757b005d681
Revises: 8e7dc5eab0b0
Create Date: 2025-06-16 13:40:10.447538

"""
from alembic import op
import sqlalchemy as sa
import sqlmodel.sql.sqltypes


# revision identifiers, used by Alembic.
revision = "8757b005d681"
down_revision = "8e7dc5eab0b0"
branch_labels = None
depends_on = None


def upgrade():
op.create_table(
"openai_assistant",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("assistant_id", sa.VARCHAR(length=255), nullable=False),
sa.Column("name", sa.VARCHAR(length=255), nullable=False),
sa.Column("max_num_results", sa.Integer, nullable=False),
sa.Column("model", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("instructions", sa.Text(), nullable=False),
sa.Column("temperature", sa.Float(), nullable=False),
sa.Column("vector_store_id", sa.VARCHAR(length=255), nullable=False),
sa.Column("organization_id", sa.Integer(), nullable=False),
sa.Column("project_id", sa.Integer(), nullable=False),
sa.Column("inserted_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(
["organization_id"], ["organization.id"], ondelete="CASCADE"
),
sa.ForeignKeyConstraint(["project_id"], ["project.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)


def downgrade():
op.drop_table("openai_assistant")
182 changes: 173 additions & 9 deletions backend/app/api/routes/responses.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,21 @@
from typing import Optional
from typing import Optional, Dict, Any
import logging

import openai
from pydantic import BaseModel
from fastapi import APIRouter, Depends
from pydantic import BaseModel, Extra
from fastapi import APIRouter, Depends, BackgroundTasks, HTTPException
from openai import OpenAI
from sqlmodel import Session

from app.api.deps import get_current_user_org, get_db
from app.api.routes.threads import send_callback
from app.crud.credentials import get_provider_credential
from app.crud.assistants import get_assistant_by_id
from app.models import UserOrganization
from app.utils import APIResponse

logger = logging.getLogger(__name__)

router = APIRouter(tags=["responses"])


Expand All @@ -23,22 +28,32 @@

class ResponsesAPIRequest(BaseModel):
project_id: int
assistant_id: str
question: str
callback_url: Optional[str] = None
response_id: Optional[str] = None

class Config:
extra = (
Extra.allow
) # This allows additional fields to be included in the request


class ResponsesSyncAPIRequest(BaseModel):
project_id: int
model: str
instructions: str
vector_store_ids: list[str]
max_num_results: Optional[int] = 20
temperature: Optional[float] = 0.1
response_id: Optional[str] = None

question: str


class Diagnostics(BaseModel):
input_tokens: int
output_tokens: int
total_tokens: int

model: str


Expand All @@ -49,11 +64,9 @@

class _APIResponse(BaseModel):
status: str

response_id: str
message: str
chunks: list[FileResultChunk]

diagnostics: Optional[Diagnostics] = None


Expand All @@ -73,14 +86,165 @@
return results


def get_additional_data(request: dict) -> dict:
"""Extract additional data from request, excluding specific keys."""
return {

Check warning on line 91 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L91

Added line #L91 was not covered by tests
k: v
for k, v in request.items()
if k
not in {"project_id", "assistant_id", "callback_url", "response_id", "question"}
}


def process_response(
request: ResponsesAPIRequest, client: OpenAI, assistant, organization_id: int
):
"""Process a response and send callback with results."""
logger.info(
f"[responses.process_response] Starting generating response for assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)
try:
response = client.responses.create(
model=assistant.model,
previous_response_id=request.response_id,
instructions=assistant.instructions,
tools=[
{
"type": "file_search",
"vector_store_ids": [assistant.vector_store_id],
"max_num_results": assistant.max_num_results,
}
],
temperature=assistant.temperature,
input=[{"role": "user", "content": request.question}],
include=["file_search_call.results"],
)
response_chunks = get_file_search_results(response)
logger.info(
f"[responses.process_response] Successfully generated response: response_id={response.id}, assistant={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)

# Convert request to dict and include all fields
request_dict = request.model_dump()
callback_response = ResponsesAPIResponse.success_response(
data=_APIResponse(
status="success",
response_id=response.id,
message=response.output_text,
chunks=response_chunks,
diagnostics=Diagnostics(
input_tokens=response.usage.input_tokens,
output_tokens=response.usage.output_tokens,
total_tokens=response.usage.total_tokens,
model=response.model,
),
**{
k: v
for k, v in request_dict.items()
if k
not in {
"project_id",
"assistant_id",
"callback_url",
"response_id",
"question",
}
},
),
)
except openai.OpenAIError as e:
error_message = handle_openai_error(e)
logger.error(

Check warning on line 157 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L155-L157

Added lines #L155 - L157 were not covered by tests
f"[responses.process_response] OpenAI API error during response processing: {error_message}, project_id={request.project_id}, organization_id={organization_id}"
)
callback_response = ResponsesAPIResponse.failure_response(error=error_message)

Check warning on line 160 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L160

Added line #L160 was not covered by tests

if request.callback_url:
logger.info(
f"[responses.process_response] Sending callback to URL: {request.callback_url}, assistant={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)

send_callback(request.callback_url, callback_response.model_dump())
logger.info(
f"[responses.process_response] Callback sent successfully, assistant={request.assistant_id}, project_id={request.project_id}, organization_id={organization_id}"
)


@router.post("/responses", response_model=dict)
async def responses(
request: ResponsesAPIRequest,
background_tasks: BackgroundTasks,
_session: Session = Depends(get_db),
_current_user: UserOrganization = Depends(get_current_user_org),
):
"""Asynchronous endpoint that processes requests in background."""
logger.info(
f"[responses.responses] Processing response request for assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)

# Get assistant details
assistant = get_assistant_by_id(
_session, request.assistant_id, _current_user.organization_id
)
if not assistant:
logger.error(

Check warning on line 190 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L190

Added line #L190 was not covered by tests
f"[responses.responses] Assistant not found: assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)
raise HTTPException(

Check warning on line 193 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L193

Added line #L193 was not covered by tests
status_code=404,
detail="Assistant not found or not active",
)

credentials = get_provider_credential(
session=_session,
org_id=_current_user.organization_id,
provider="openai",
project_id=request.project_id,
)
if not credentials or "api_key" not in credentials:
logger.error(

Check warning on line 205 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L205

Added line #L205 was not covered by tests
f"[responses.responses] OpenAI API key not configured for org_id={_current_user.organization_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)
return {

Check warning on line 208 in backend/app/api/routes/responses.py

View check run for this annotation

Codecov / codecov/patch

backend/app/api/routes/responses.py#L208

Added line #L208 was not covered by tests
"success": False,
"error": "OpenAI API key not configured for this organization.",
"data": None,
"metadata": None,
}

client = OpenAI(api_key=credentials["api_key"])

# Send immediate response
initial_response = {
"success": True,
"data": {
"status": "processing",
"message": "Response creation started",
"success": True,
},
"error": None,
"metadata": None,
}

# Schedule background task
background_tasks.add_task(
process_response, request, client, assistant, _current_user.organization_id
)
logger.info(
f"[responses.responses] Background task scheduled for response processing: assistant_id={request.assistant_id}, project_id={request.project_id}, organization_id={_current_user.organization_id}"
)

return initial_response


@router.post("/responses/sync", response_model=ResponsesAPIResponse)
async def responses_sync(
request: ResponsesAPIRequest,
request: ResponsesSyncAPIRequest,
_session: Session = Depends(get_db),
_current_user: UserOrganization = Depends(get_current_user_org),
):
"""
Temp synchronous endpoint for benchmarking OpenAI responses API
Synchronous endpoint for benchmarking OpenAI responses API
"""
credentials = get_provider_credential(
session=_session,
Expand Down
Loading