diff --git a/app/backend/app.py b/app/backend/app.py index 07d4d64027..cb9edd378b 100644 --- a/app/backend/app.py +++ b/app/backend/app.py @@ -54,6 +54,7 @@ from approaches.promptmanager import PromptyManager from approaches.retrievethenread import RetrieveThenReadApproach from approaches.retrievethenreadvision import RetrieveThenReadVisionApproach +from search_client import AsyncGroundingSearchClient from chat_history.cosmosdb import chat_history_cosmosdb_bp from config import ( CONFIG_ASK_APPROACH, @@ -66,6 +67,8 @@ CONFIG_CHAT_VISION_APPROACH, CONFIG_CREDENTIAL, CONFIG_GPT4V_DEPLOYED, + CONFIG_GROUNDING_SEARCH_CLIENT, + CONFIG_GROUNDING_SEARCH_ENABLED, CONFIG_INGESTER, CONFIG_LANGUAGE_PICKER_ENABLED, CONFIG_OPENAI_CLIENT, @@ -299,6 +302,7 @@ def config(): "showSpeechOutputAzure": current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED], "showChatHistoryBrowser": current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], "showChatHistoryCosmos": current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], + "showGroundingSearchOption": current_app.config[CONFIG_GROUNDING_SEARCH_ENABLED], } ) @@ -466,6 +470,9 @@ async def setup_clients(): USE_SPEECH_OUTPUT_AZURE = os.getenv("USE_SPEECH_OUTPUT_AZURE", "").lower() == "true" USE_CHAT_HISTORY_BROWSER = os.getenv("USE_CHAT_HISTORY_BROWSER", "").lower() == "true" USE_CHAT_HISTORY_COSMOS = os.getenv("USE_CHAT_HISTORY_COSMOS", "").lower() == "true" + USE_GROUNDING_SEARCH = os.getenv("USE_GROUNDING_SEARCH", "").lower() == "true" + GROUNDING_SEARCH_API_KEY = os.getenv("GROUNDING_SEARCH_API_KEY") + GROUNDING_SEARCH_ENDPOINT = os.getenv("GROUNDING_SEARCH_ENDPOINT") # WEBSITE_HOSTNAME is always set by App Service, RUNNING_IN_PRODUCTION is set in main.bicep RUNNING_ON_AZURE = os.getenv("WEBSITE_HOSTNAME") is not None or os.getenv("RUNNING_IN_PRODUCTION") is not None @@ -588,6 +595,19 @@ async def setup_clients(): # Wait until token is needed to fetch for the first time current_app.config[CONFIG_SPEECH_SERVICE_TOKEN] = None + if USE_GROUNDING_SEARCH: + current_app.logger.info("USE_GROUNDING_SEARCH is true, setting up search client") + if not GROUNDING_SEARCH_API_KEY: + raise ValueError("GROUNDING_SEARCH_API_KEY must be set when USE_GROUNDING_SEARCH is true") + if GROUNDING_SEARCH_ENDPOINT: + grounding_search_client = AsyncGroundingSearchClient(GROUNDING_SEARCH_API_KEY, GROUNDING_SEARCH_ENDPOINT) + else: + grounding_search_client = AsyncGroundingSearchClient(GROUNDING_SEARCH_API_KEY) + current_app.config[CONFIG_GROUNDING_SEARCH_CLIENT] = grounding_search_client + else: + current_app.logger.info("USE_GROUNDING_SEARCH is false, search client not set up") + grounding_search_client = None + if OPENAI_HOST.startswith("azure"): if OPENAI_HOST == "azure_custom": current_app.logger.info("OPENAI_HOST is azure_custom, setting up Azure OpenAI custom client") @@ -642,6 +662,7 @@ async def setup_clients(): current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED] = USE_SPEECH_OUTPUT_AZURE current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED] = USE_CHAT_HISTORY_BROWSER current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED] = USE_CHAT_HISTORY_COSMOS + current_app.config[CONFIG_GROUNDING_SEARCH_ENABLED] = USE_GROUNDING_SEARCH prompt_manager = PromptyManager() @@ -678,6 +699,7 @@ async def setup_clients(): query_language=AZURE_SEARCH_QUERY_LANGUAGE, query_speller=AZURE_SEARCH_QUERY_SPELLER, prompt_manager=prompt_manager, + grounding_search_client=grounding_search_client, ) if USE_GPT4V: @@ -724,6 +746,7 @@ async def setup_clients(): query_language=AZURE_SEARCH_QUERY_LANGUAGE, query_speller=AZURE_SEARCH_QUERY_SPELLER, prompt_manager=prompt_manager, + grounding_search_client=grounding_search_client, ) diff --git a/app/backend/approaches/approach.py b/app/backend/approaches/approach.py index 44a1d6380a..3eee3b3c1f 100644 --- a/app/backend/approaches/approach.py +++ b/app/backend/approaches/approach.py @@ -25,6 +25,7 @@ from openai.types.chat import ChatCompletionMessageParam from approaches.promptmanager import PromptManager +from search_client import WebPage from core.authentication import AuthenticationHelper @@ -236,6 +237,9 @@ def get_citation(self, sourcepage: str, use_image_citation: bool) -> str: return sourcepage + def get_links(self, webpages: list[WebPage]) -> list[str]: + return [f"{page.id}: {page.snippet}" for page in webpages] + async def compute_text_embedding(self, q: str): SUPPORTED_DIMENSIONS_MODEL = { "text-embedding-ada-002": False, diff --git a/app/backend/approaches/chatreadretrieveread.py b/app/backend/approaches/chatreadretrieveread.py index 7777b9a741..e8a481fbb1 100644 --- a/app/backend/approaches/chatreadretrieveread.py +++ b/app/backend/approaches/chatreadretrieveread.py @@ -11,9 +11,10 @@ ) from openai_messages_token_helper import build_messages, get_token_limit -from approaches.approach import ThoughtStep +from approaches.approach import Document, ThoughtStep from approaches.chatapproach import ChatApproach from approaches.promptmanager import PromptManager +from search_client import AsyncGroundingSearchClient, WebPage from core.authentication import AuthenticationHelper @@ -39,10 +40,12 @@ def __init__( content_field: str, query_language: str, query_speller: str, - prompt_manager: PromptManager + prompt_manager: PromptManager, + grounding_search_client: Optional[AsyncGroundingSearchClient] = None, ): self.search_client = search_client self.openai_client = openai_client + self.grounding_search_client = grounding_search_client self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment @@ -58,6 +61,9 @@ def __init__( self.query_rewrite_prompt = self.prompt_manager.load_prompt("chat_query_rewrite.prompty") self.query_rewrite_tools = self.prompt_manager.load_tools("chat_query_rewrite_tools.json") self.answer_prompt = self.prompt_manager.load_prompt("chat_answer_question.prompty") + self.ground_answer_prompt = self.prompt_manager.load_prompt("chat_ground_answer_question.prompty") + self.ground_rewrite_prompt = self.prompt_manager.load_prompt("chat_ground_rewrite.prompty") + self.ground_rewrite_tools = self.prompt_manager.load_tools("chat_ground_rewrite_tools.json") @overload async def run_until_final_call( @@ -89,6 +95,7 @@ async def run_until_final_call( use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_ranker = True if overrides.get("semantic_ranker") else False use_semantic_captions = True if overrides.get("semantic_captions") else False + use_grounding_search = True if overrides.get("use_grounding_search") else False top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) @@ -98,36 +105,45 @@ async def run_until_final_call( if not isinstance(original_user_query, str): raise ValueError("The most recent message content must be a string.") + # STEP 1: Generate an optimized keyword search query based on the chat history and the last question + async def keyword_rewrite(rendered_prompt, tools): + query_response_token_limit = 100 + query_messages = build_messages( + model=self.chatgpt_model, + system_prompt=rendered_prompt.system_content, + few_shots=rendered_prompt.few_shot_messages, + past_messages=rendered_prompt.past_messages, + new_user_content=rendered_prompt.new_user_content, + tools=tools, + max_tokens=self.chatgpt_token_limit - query_response_token_limit, + fallback_to_default=self.ALLOW_NON_GPT_MODELS, + ) + + chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( + messages=query_messages, # type: ignore + # Azure OpenAI takes the deployment name as the model name + model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, + temperature=0.0, # Minimize creativity for search query generation + max_tokens=query_response_token_limit, # Setting too low risks malformed JSON, setting too high may affect performance + n=1, + tools=tools, + seed=seed, + ) + + return query_messages, self.get_search_query(chat_completion, original_user_query) + rendered_query_prompt = self.prompt_manager.render_prompt( self.query_rewrite_prompt, {"user_query": original_user_query, "past_messages": messages[:-1]} ) tools: List[ChatCompletionToolParam] = self.query_rewrite_tools - - # STEP 1: Generate an optimized keyword search query based on the chat history and the last question - query_response_token_limit = 100 - query_messages = build_messages( - model=self.chatgpt_model, - system_prompt=rendered_query_prompt.system_content, - few_shots=rendered_query_prompt.few_shot_messages, - past_messages=rendered_query_prompt.past_messages, - new_user_content=rendered_query_prompt.new_user_content, - tools=tools, - max_tokens=self.chatgpt_token_limit - query_response_token_limit, - fallback_to_default=self.ALLOW_NON_GPT_MODELS, - ) - - chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( - messages=query_messages, # type: ignore - # Azure OpenAI takes the deployment name as the model name - model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, - temperature=0.0, # Minimize creativity for search query generation - max_tokens=query_response_token_limit, # Setting too low risks malformed JSON, setting too high may affect performance - n=1, - tools=tools, - seed=seed, - ) - - query_text = self.get_search_query(chat_completion, original_user_query) + query_messages, query_text = await keyword_rewrite(rendered_query_prompt, tools) + if use_grounding_search and self.grounding_search_client: + ground_search_prompt = self.prompt_manager.render_prompt( + self.ground_rewrite_prompt, + {"user_query": original_user_query, "past_messages": messages[:-1]}, + ) + _, ground_query_text = await keyword_rewrite(ground_search_prompt, self.ground_rewrite_tools) + ground_results = await self.grounding_search_client.search(ground_query_text, lang=self.query_language) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query @@ -136,7 +152,7 @@ async def run_until_final_call( if use_vector_search: vectors.append(await self.compute_text_embedding(query_text)) - results = await self.search( + results: list[Document] = await self.search( top, query_text, filter, @@ -151,16 +167,33 @@ async def run_until_final_call( # STEP 3: Generate a contextual and content specific answer using the search results and chat history text_sources = self.get_sources_content(results, use_semantic_captions, use_image_citation=False) - rendered_answer_prompt = self.prompt_manager.render_prompt( - self.answer_prompt, - self.get_system_prompt_variables(overrides.get("prompt_template")) - | { - "include_follow_up_questions": bool(overrides.get("suggest_followup_questions")), - "past_messages": messages[:-1], - "user_query": original_user_query, - "text_sources": text_sources, - }, - ) + web_sources: list[WebPage] = [] + if use_grounding_search and ground_results.totalEstimatedMatches > 0: + web_sources = ground_results.value[:2] + web_sources_text = self.get_links(web_sources) + + rendered_answer_prompt = self.prompt_manager.render_prompt( + self.ground_answer_prompt, + self.get_system_prompt_variables(overrides.get("prompt_template")) + | { + "include_follow_up_questions": bool(overrides.get("suggest_followup_questions")), + "past_messages": messages[:-1], + "user_query": original_user_query, + "text_sources": text_sources, + "web_search_snippets": web_sources_text, + }, + ) + else: + rendered_answer_prompt = self.prompt_manager.render_prompt( + self.answer_prompt, + self.get_system_prompt_variables(overrides.get("prompt_template")) + | { + "include_follow_up_questions": bool(overrides.get("suggest_followup_questions")), + "past_messages": messages[:-1], + "user_query": original_user_query, + "text_sources": text_sources, + }, + ) response_token_limit = 1024 messages = build_messages( @@ -173,7 +206,7 @@ async def run_until_final_call( ) extra_info = { - "data_points": {"text": text_sources}, + "data_points": {"text": text_sources, "web_search": [hit.model_dump() for hit in web_sources]}, "thoughts": [ ThoughtStep( "Prompt to generate search query", @@ -184,6 +217,11 @@ async def run_until_final_call( else {"model": self.chatgpt_model} ), ), + ThoughtStep("Grounding search query", ground_query_text if use_grounding_search else None, {}), + ThoughtStep( + "Grounding search results", + [result.snippet for result in ground_results.value[:2]] if use_grounding_search else None, + ), ThoughtStep( "Search using generated search query", query_text, diff --git a/app/backend/approaches/chatreadretrievereadvision.py b/app/backend/approaches/chatreadretrievereadvision.py index 3c05d22180..dd02198428 100644 --- a/app/backend/approaches/chatreadretrievereadvision.py +++ b/app/backend/approaches/chatreadretrievereadvision.py @@ -14,6 +14,7 @@ from approaches.approach import ThoughtStep from approaches.chatapproach import ChatApproach from approaches.promptmanager import PromptManager +from search_client import AsyncGroundingSearchClient from core.authentication import AuthenticationHelper from core.imageshelper import fetch_image @@ -46,10 +47,12 @@ def __init__( vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]], prompt_manager: PromptManager, + grounding_search_client: Optional[AsyncGroundingSearchClient] = None, ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client + self.grounding_search_client = grounding_search_client self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment diff --git a/app/backend/approaches/prompts/chat_ground_answer_question.prompty b/app/backend/approaches/prompts/chat_ground_answer_question.prompty new file mode 100644 index 0000000000..309c0313cf --- /dev/null +++ b/app/backend/approaches/prompts/chat_ground_answer_question.prompty @@ -0,0 +1,58 @@ +--- +name: Chat +description: Answer a question (with chat history) using text sources and web searches. +model: + api: chat +sample: + user_query: What does a product manager do that a CEO doesn't? + include_follow_up_questions: true + past_messages: + - role: user + content: "What does a CEO do?" + - role: assistant + content: "A CEO, or Chief Executive Officer, is responsible for providing strategic direction and oversight to a company to ensure its long-term success and profitability. They develop and implement strategies and objectives for financial success and growth, provide guidance to the executive team, manage day-to-day operations, ensure compliance with laws and regulations, develop and maintain relationships with stakeholders, monitor industry trends, and represent the company in public events 12. [role_library.pdf#page=1][role_library.pdf#page=3]" + text_sources: + - "role_library.pdf#page=29: The Manager of Product Management will collaborate with internal teams, such as engineering, sales, marketing, and finance, as well as external partners, suppliers, and customers to ensure successful product execution. Responsibilities: · Lead the product management team and provide guidance on product strategy, design, development, and launch. · Develop and implement product life-cycle management processes. · Monitor and analyze industry trends to identify opportunities for new products. · Develop product marketing plans and go-to-market strategies. · Research customer needs and develop customer-centric product roadmaps. · Collaborate with internal teams to ensure product execution and successful launch. · Develop pricing strategies and cost models. · Oversee product portfolio and performance metrics. · Manage product development budget. · Analyze product performance and customer feedback to identify areas for improvement. Qualifications: · Bachelor's degree in business, engineering, or a related field. · At least 5 years of experience in product management. · Proven track record of successful product launches." + - "role_library.pdf#page=23: Company: Contoso Electronics Location: Anywhere Job Type: Full-Time Salary: Competitive, commensurate with experience Job Summary: The Senior Manager of Product Management will be responsible for leading the product management team at Contoso Electronics. This role includes developing strategies, plans and objectives for the product management team and managing the day-to-day operations. The Senior Manager of Product Management will be responsible for the successful launch of new products and the optimization of existing products. Responsibilities: · Develop and implement product management strategies, plans and objectives to maximize team performance. · Analyze competitive landscape and market trends to develop product strategies. · Lead the product management team in the development of product plans, roadmaps and launch plans. · Monitor the performance of product management team, analyze results and implement corrective action as needed. · Manage the product lifecycle, including product development, launch, and end of life. · Ensure product features and benefits meet customer requirements. · Establish and maintain relationships with key customers, partners, and vendors." + - "role_library.pdf#page=28: · 7+ years of experience in research and development in the electronics sector. · Proven track record of successfully designing, testing, and optimizing products. · Experience leading a team of researchers and engineers. · Excellent problem-solving and analytical skills. · Ability to work in a fast-paced environment and meet tight deadlines.· Knowledge of industry trends, technologies, and regulations. · Excellent communication and presentation skills. Manager of Product Management Job Title: Manager of Product Management, Contoso Electronics Job Summary: The Manager of Product Management is responsible for overseeing the product management team, driving product development and marketing strategy for Contoso Electronics. This individual will be accountable for the successful launch of new products and the implementation of product life-cycle management processes. The Manager of Product Management will collaborate with internal teams, such as engineering, sales, marketing, and finance, as well as external partners, suppliers, and customers to ensure successful product execution." +--- +system: +{% if override_prompt %} +{{ override_prompt }} +{% else %} +Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. +Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. +If the question is not in English, answer in the language used in the question. +Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. +Additional "web search snippets" are included, each with a unique URI then a colon and the snippet text in HTML. If the sources do not answer the question, you can use the web search snippets to find the answer. The original sources take precedence over the web search snippets. +If you use a web search snippet in your response, always include the URI of the snippet in square brackets, for example [https://api.bing.microsoft.com/api/v7/#WebPages.0]. +{{ injected_prompt }} +{% endif %} + +{% if include_follow_up_questions %} +Generate 3 very brief follow-up questions that the user would likely ask next. +Enclose the follow-up questions in double angle brackets. Example: +<> +<> +<> +Do not repeat questions that have already been asked. +Make sure the last question ends with ">>". +{% endif %} + +{% for message in past_messages %} +{{ message["role"] }}: +{{ message["content"] }} +{% endfor %} + +user: +{{ user_query }} + +Sources: +{% for text_source in text_sources %} +{{ text_source }} +{% endfor %} + +Web Search Snippets: +{% for snippet in web_search_snippets %} +{{ snippet }} +{% endfor %} \ No newline at end of file diff --git a/app/backend/approaches/prompts/chat_ground_rewrite.prompty b/app/backend/approaches/prompts/chat_ground_rewrite.prompty new file mode 100644 index 0000000000..ce3ebd7089 --- /dev/null +++ b/app/backend/approaches/prompts/chat_ground_rewrite.prompty @@ -0,0 +1,51 @@ +--- +name: Rewrite RAG query for web search +description: Suggest the optimal search query based on the user's query, examples, and chat history. +model: + api: chat + parameters: + tools: ${file:chat_ground_rewrite_tools.json} +sample: + user_query: Does it include hearing? + past_messages: + - role: user + content: "What is included in my Northwind Health Plus plan that is not in standard?" + - role: assistant + content: "The Northwind Health Plus plan includes coverage for emergency services, mental health and substance abuse coverage, and out-of-network services, which are not included in the Northwind Standard plan. [Benefit_Options.pdf#page=3]" +--- +system: +Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base and/or a web search. +If the user question requires grounding from an Internet search, generate an Internet search query based on the conversation and the new question. +If you cannot generate a search query, return just the number 0. + +user: +(EXAMPLE) Who won the 2020 NBA championship? + +assistant: +2020 NBA championship winner + +user: +(EXAMPLE) What are my health plans? + +assistant: +0 + +user: +(EXAMPLE) What is the corporate pay scheme? + +assistant: +0 + +user: +(EXAMPLE) What is the address of Microsoft headquarters? + +assistant: +Microsoft headquarters address + +{% for message in past_messages %} +{{ message["role"] }}: +{{ message["content"] }} +{% endfor %} + +user: +Generate Grounding search query for: {{ user_query }} diff --git a/app/backend/approaches/prompts/chat_ground_rewrite_tools.json b/app/backend/approaches/prompts/chat_ground_rewrite_tools.json new file mode 100644 index 0000000000..eef5214962 --- /dev/null +++ b/app/backend/approaches/prompts/chat_ground_rewrite_tools.json @@ -0,0 +1,17 @@ +[{ + "type": "function", + "function": { + "name": "search_internet", + "description": "Retrieve sources from the Internet", + "parameters": { + "type": "object", + "properties": { + "internet_search_query": { + "type": "string", + "description": "Query string to retrieve documents from an Internet search e.g.: 'Microsoft headquarters address'" + } + }, + "required": ["internet_search_query"] + } + } +}] diff --git a/app/backend/config.py b/app/backend/config.py index a9315df6c0..24a9deafa6 100644 --- a/app/backend/config.py +++ b/app/backend/config.py @@ -26,4 +26,6 @@ CONFIG_CHAT_HISTORY_COSMOS_ENABLED = "chat_history_cosmos_enabled" CONFIG_COSMOS_HISTORY_CLIENT = "cosmos_history_client" CONFIG_COSMOS_HISTORY_CONTAINER = "cosmos_history_container" +CONFIG_GROUNDING_SEARCH_ENABLED = "grounding_search_enabled" +CONFIG_GROUNDING_SEARCH_CLIENT = "grounding_search_client" CONFIG_COSMOS_HISTORY_VERSION = "cosmos_history_version" diff --git a/app/backend/requirements.in b/app/backend/requirements.in index 66aa504dd3..97cb5a53f9 100644 --- a/app/backend/requirements.in +++ b/app/backend/requirements.in @@ -32,3 +32,4 @@ python-dotenv prompty rich typing-extensions +azure-ai-projects \ No newline at end of file diff --git a/app/backend/search_client.py b/app/backend/search_client.py new file mode 100644 index 0000000000..c1fc8b2d33 --- /dev/null +++ b/app/backend/search_client.py @@ -0,0 +1,130 @@ +""" +An async client for Bing Web Search API. +""" + +from typing import Optional, Union + +import httpx +from pydantic import BaseModel, ConfigDict +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import BingGroundingTool + + +BING_CONNECTION_NAME = 'antbingtesting' + +class WebPage(BaseModel): + id: str + name: str + url: str + displayUrl: str + dateLastCrawled: str + language: str + snippet: Optional[str] = None + isFamilyFriendly: Optional[bool] = True + siteName: Optional[str] = None + + # There are more fields in the response, but we only care about these for now. + model_config = ConfigDict( + extra="allow", + ) + + +class WebAnswer(BaseModel): + totalEstimatedMatches: int + value: list[WebPage] + webSearchUrl: str + + # There are more fields in the response, but we only care about these for now. + model_config = ConfigDict( + extra="allow", + ) + + +class AsyncGroundingSearchClient: + project_client: AIProjectClient + bing_tool: BingGroundingTool + agent_id: str = "asst_u8x2Hb9c9stVQwQMbostJ8JK" + + def __init__(self, api_key: str, endpoint: Optional[str] = None): + self.connection_string = endpoint + + + async def search(self, query: str, lang="en-US") -> WebAnswer: + cred = DefaultAzureCredential() + # endpoint is the connection string + self.project_client = AIProjectClient.from_connection_string(self.connection_string, cred) + + async with self.project_client: + # Create thread for communication + thread = await self.project_client.agents.create_thread() + + # Create message to thread + message = await self.project_client.agents.create_message( + thread_id=thread.id, + role="user", + content=query, + ) + + # Create and process agent run in thread with tools + run = await self.project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=self.agent_id) + + if run.status == "failed": + raise Exception(run.last_error) + + # Fetch and log all messages + messages = await self.project_client.agents.list_messages(thread_id=thread.id) + + print(f"Messages: {messages}") + + run_steps = await self.project_client.agents.list_run_steps(run_id=run.id, thread_id=thread.id) + run_steps_data = run_steps['data'] + print(run_steps_data) + + url = messages.data[0].content[0].text.annotations[0].as_dict()['url_citation']['url'] + title = messages.data[0].content[0].text.annotations[0].as_dict()['url_citation']['title'] + snippet = messages.data[0].content[0].text.value + + + return WebAnswer( + totalEstimatedMatches=1, + webSearchUrl="https://www.bing.com", + value=[ + WebPage( + id="1", + name=title, + url=url, + displayUrl=url, + dateLastCrawled="2021-10-01", + language="en", + snippet=snippet, + isFamilyFriendly=True, + siteName="Bing" + ) + ], + ) + +class AsyncBingSearchClient(AsyncGroundingSearchClient): + def __init__(self, api_key: str, bing_endpoint: Optional[str] = "api.bing.microsoft.com"): + self.api_key = api_key + self.base_url = f"https://{bing_endpoint}/v7.0/search" + self.headers = { + "Ocp-Apim-Subscription-Key": self.api_key, + "User-Agent": "azure-search-openai-demo", + # "X-Search-Location": "" # this would be useful in future + } + + async def search(self, query: str, lang="en-US") -> WebAnswer: + params: dict[str, Union[str, bool, int]] = { + "q": query, + "mkt": lang, + "textDecorations": True, + "textFormat": "HTML", + "responseFilter": "Webpages", + "safeSearch": "Strict", + "setLang": lang, + } + async with httpx.AsyncClient() as client: + response = await client.get(self.base_url, headers=self.headers, params=params) + response.raise_for_status() + return WebAnswer.model_validate(response.json()["webPages"]) diff --git a/app/frontend/src/api/models.ts b/app/frontend/src/api/models.ts index f560271325..b0ae97bd93 100644 --- a/app/frontend/src/api/models.ts +++ b/app/frontend/src/api/models.ts @@ -37,6 +37,7 @@ export type ChatAppRequestOverrides = { gpt4v_input?: GPT4VInput; vector_fields: VectorFieldOptions[]; language: string; + use_grounding_search?: boolean; }; export type ResponseMessage = { @@ -85,6 +86,7 @@ export type Config = { showGPT4VOptions: boolean; showSemanticRankerOption: boolean; showVectorOption: boolean; + showGroundingSearchOption: boolean; showUserUpload: boolean; showLanguagePicker: boolean; showSpeechInput: boolean; diff --git a/app/frontend/src/components/Answer/Answer.tsx b/app/frontend/src/components/Answer/Answer.tsx index 75b0a03504..7c2a652409 100644 --- a/app/frontend/src/components/Answer/Answer.tsx +++ b/app/frontend/src/components/Answer/Answer.tsx @@ -8,10 +8,11 @@ import rehypeRaw from "rehype-raw"; import styles from "./Answer.module.css"; import { ChatAppResponse, getCitationFilePath, SpeechConfig } from "../../api"; -import { parseAnswerToHtml } from "./AnswerParser"; +import { citationIdToCitation, parseAnswerToHtml } from "./AnswerParser"; import { AnswerIcon } from "./AnswerIcon"; import { SpeechOutputBrowser } from "./SpeechOutputBrowser"; import { SpeechOutputAzure } from "./SpeechOutputAzure"; +import { WebPage } from "../SupportingContent"; interface Props { answer: ChatAppResponse; @@ -110,11 +111,23 @@ export const Answer = ({ {t("citationWithColon")} {parsedAnswer.citations.map((x, i) => { const path = getCitationFilePath(x); - return ( - onCitationClicked(path)}> - {`${++i}. ${x}`} - - ); + const citation = citationIdToCitation(x, answer.context.data_points); + + if (citation.type === "document") + return ( + onCitationClicked(path)}> + {`${++i}. ${x}`} + + ); + else if (citation.type === "web") { + const webPage = citation.citation as WebPage; + const label = webPage.siteName ? webPage.siteName : webPage.url; + return ( + + {`${++i}. ${label}`} + + ); + } })} diff --git a/app/frontend/src/components/Answer/AnswerParser.tsx b/app/frontend/src/components/Answer/AnswerParser.tsx index 3807592f6d..108d011189 100644 --- a/app/frontend/src/components/Answer/AnswerParser.tsx +++ b/app/frontend/src/components/Answer/AnswerParser.tsx @@ -1,11 +1,40 @@ import { renderToStaticMarkup } from "react-dom/server"; import { ChatAppResponse, getCitationFilePath } from "../../api"; +import { WebPage } from "../SupportingContent"; type HtmlParsedAnswer = { answerHtml: string; citations: string[]; }; +type Citation = { + id: string; + type: "document" | "web"; + citation: string | WebPage; +}; + +export function citationIdToCitation(citationId: string, contextDataPoints: any): Citation { + // See if this is a web page citation + const webSearch = contextDataPoints.web_search; + if (Array.isArray(webSearch)) { + const webPage = webSearch.find((page: WebPage) => page.id === citationId); + if (webPage) { + return { + id: citationId, + type: "web", + citation: webPage + }; + } + } + + // Otherwise, assume it's a document citation + return { + id: citationId, + type: "document", + citation: citationId + }; +} + // Function to validate citation format and check if dataPoint starts with possible citation function isCitationValid(contextDataPoints: any, citationCandidate: string): boolean { const regex = /.+\.\w{1,}(?:#\S*)?$/; @@ -22,6 +51,14 @@ function isCitationValid(contextDataPoints: any, citationCandidate: string): boo } else { return false; } + // If there are web_sources, add those to the list of identifiers + if (Array.isArray(contextDataPoints.web_search)) { + contextDataPoints.web_search.forEach((source: any) => { + if (source.id) { + dataPointsArray.push(source.id); + } + }); + } const isValidCitation = dataPointsArray.some(dataPoint => { return dataPoint.startsWith(citationCandidate); diff --git a/app/frontend/src/components/Settings/Settings.tsx b/app/frontend/src/components/Settings/Settings.tsx index de404297ab..571930a97c 100644 --- a/app/frontend/src/components/Settings/Settings.tsx +++ b/app/frontend/src/components/Settings/Settings.tsx @@ -24,6 +24,7 @@ export interface SettingsProps { retrievalMode: RetrievalMode; useGPT4V: boolean; gpt4vInput: GPT4VInput; + useGroundingSearch: boolean; vectorFieldList: VectorFieldOptions[]; showSemanticRankerOption: boolean; showGPT4VOptions: boolean; @@ -40,6 +41,7 @@ export interface SettingsProps { promptTemplatePrefix?: string; promptTemplateSuffix?: string; showSuggestFollowupQuestions?: boolean; + showGroundingSearchOption: boolean; } export const Settings = ({ @@ -56,10 +58,12 @@ export const Settings = ({ retrievalMode, useGPT4V, gpt4vInput, + useGroundingSearch, vectorFieldList, showSemanticRankerOption, showGPT4VOptions, showVectorOption, + showGroundingSearchOption, useOidSecurityFilter, useGroupsSecurityFilter, useLogin, @@ -104,6 +108,8 @@ export const Settings = ({ const shouldStreamFieldId = useId("shouldStreamField"); const suggestFollowupQuestionsId = useId("suggestFollowupQuestions"); const suggestFollowupQuestionsFieldId = useId("suggestFollowupQuestionsField"); + const useGroundingSearchId = useId("useGroundingSearch"); + const useGroundingSearchFieldId = useId("useGroundingSearchField"); const renderLabel = (props: RenderLabelType | undefined, labelId: string, fieldId: string, helpText: string) => ( @@ -311,6 +317,18 @@ export const Settings = ({ } /> )} + + {showGroundingSearchOption && ( + onChange("useGroundingSearch", !!checked)} + aria-labelledby={useGroundingSearchId} + onRenderLabel={props => renderLabel(props, useGroundingSearchId, useGroundingSearchFieldId, t("helpTexts.useGroundingSearch"))} + /> + )} ); }; diff --git a/app/frontend/src/components/SupportingContent/SupportingContent.tsx b/app/frontend/src/components/SupportingContent/SupportingContent.tsx index 94df0ecca2..aadb32284c 100644 --- a/app/frontend/src/components/SupportingContent/SupportingContent.tsx +++ b/app/frontend/src/components/SupportingContent/SupportingContent.tsx @@ -2,13 +2,26 @@ import { parseSupportingContentItem } from "./SupportingContentParser"; import styles from "./SupportingContent.module.css"; +export interface WebPage { + id: string; + name: string; + url: string; + displayUrl: string; + dateLastCrawled: string; + language: string; + snippet?: string; + isFamilyFriendly?: boolean; + siteName?: string; +} + interface Props { - supportingContent: string[] | { text: string[]; images?: string[] }; + supportingContent: string[] | { text: string[]; images?: string[]; web_search?: WebPage[] }; } export const SupportingContent = ({ supportingContent }: Props) => { const textItems = Array.isArray(supportingContent) ? supportingContent : supportingContent.text; const imageItems = !Array.isArray(supportingContent) ? supportingContent?.images : []; + const webSearchItems = !Array.isArray(supportingContent) ? supportingContent?.web_search : []; return (
    {textItems.map((c, ind) => { @@ -27,6 +40,17 @@ export const SupportingContent = ({ supportingContent }: Props) => { ); })} + {webSearchItems?.map((webPage, ind) => { + return ( +
  • +

    {webPage.name}

    + + {webPage.url} + +

    {webPage.snippet}

    +
  • + ); + })}
); }; diff --git a/app/frontend/src/locales/en/translation.json b/app/frontend/src/locales/en/translation.json index 07f657da8b..8222f030a6 100644 --- a/app/frontend/src/locales/en/translation.json +++ b/app/frontend/src/locales/en/translation.json @@ -118,7 +118,8 @@ }, "useOidSecurityFilter": "Use oid security filter", "useGroupsSecurityFilter": "Use groups security filter", - "shouldStream": "Stream chat completion responses" + "shouldStream": "Stream chat completion responses", + "useGroundingSearch": "Use Grounding search" }, "helpTexts": { @@ -151,6 +152,7 @@ "Sets the retrieval mode for the Azure AI Search query. `Vectors + Text (Hybrid)` uses a combination of vector search and full text search, `Vectors` uses only vector search, and `Text` uses only full text search. Hybrid is generally optimal.", "streamChat": "Continuously streams the response to the chat UI as it is generated.", "useOidSecurityFilter": "Filter search results based on the authenticated user's OID.", - "useGroupsSecurityFilter": "Filter search results based on the authenticated user's groups." + "useGroupsSecurityFilter": "Filter search results based on the authenticated user's groups.", + "useGroundingSearch": "Use Grounding search to ground search results." } } diff --git a/app/frontend/src/pages/ask/Ask.tsx b/app/frontend/src/pages/ask/Ask.tsx index 35137afb3d..6a023d5131 100644 --- a/app/frontend/src/pages/ask/Ask.tsx +++ b/app/frontend/src/pages/ask/Ask.tsx @@ -33,6 +33,7 @@ export function Component(): JSX.Element { const [useSemanticRanker, setUseSemanticRanker] = useState(true); const [useSemanticCaptions, setUseSemanticCaptions] = useState(false); const [useGPT4V, setUseGPT4V] = useState(false); + const [useGroundingSearch, setUseGroundingSearch] = useState(false); const [gpt4vInput, setGPT4VInput] = useState(GPT4VInput.TextAndImages); const [includeCategory, setIncludeCategory] = useState(""); const [excludeCategory, setExcludeCategory] = useState(""); @@ -48,6 +49,7 @@ export function Component(): JSX.Element { const [showSpeechInput, setShowSpeechInput] = useState(false); const [showSpeechOutputBrowser, setShowSpeechOutputBrowser] = useState(false); const [showSpeechOutputAzure, setShowSpeechOutputAzure] = useState(false); + const [showGroundingSearchOption, setShowGroundingSearchOption] = useState(false); const audio = useRef(new Audio()).current; const [isPlaying, setIsPlaying] = useState(false); @@ -79,6 +81,7 @@ export function Component(): JSX.Element { setUseSemanticRanker(config.showSemanticRankerOption); setShowSemanticRankerOption(config.showSemanticRankerOption); setShowVectorOption(config.showVectorOption); + setShowGroundingSearchOption(config.showGroundingSearchOption); if (!config.showVectorOption) { setRetrievalMode(RetrievalMode.Text); } @@ -132,6 +135,7 @@ export function Component(): JSX.Element { use_gpt4v: useGPT4V, gpt4v_input: gpt4vInput, language: i18n.language, + use_grounding_search: useGroundingSearch, ...(seed !== null ? { seed: seed } : {}) } }, @@ -204,6 +208,9 @@ export function Component(): JSX.Element { case "retrievalMode": setRetrievalMode(value); break; + case "useGroundingSearch": + setUseGroundingSearch(value); + break; } }; @@ -325,11 +332,13 @@ export function Component(): JSX.Element { includeCategory={includeCategory} retrievalMode={retrievalMode} useGPT4V={useGPT4V} + useGroundingSearch={useGroundingSearch} gpt4vInput={gpt4vInput} vectorFieldList={vectorFieldList} showSemanticRankerOption={showSemanticRankerOption} showGPT4VOptions={showGPT4VOptions} showVectorOption={showVectorOption} + showGroundingSearchOption={showGroundingSearchOption} useOidSecurityFilter={useOidSecurityFilter} useGroupsSecurityFilter={useGroupsSecurityFilter} useLogin={!!useLogin} diff --git a/app/frontend/src/pages/chat/Chat.tsx b/app/frontend/src/pages/chat/Chat.tsx index e3c0cfd77f..a44c036bff 100644 --- a/app/frontend/src/pages/chat/Chat.tsx +++ b/app/frontend/src/pages/chat/Chat.tsx @@ -58,6 +58,7 @@ const Chat = () => { const [useGroupsSecurityFilter, setUseGroupsSecurityFilter] = useState(false); const [gpt4vInput, setGPT4VInput] = useState(GPT4VInput.TextAndImages); const [useGPT4V, setUseGPT4V] = useState(false); + const [useGroundingSearch, setUseGroundingSearch] = useState(false); const lastQuestionRef = useRef(""); const chatMessageStreamEnd = useRef(null); @@ -77,6 +78,7 @@ const Chat = () => { const [showGPT4VOptions, setShowGPT4VOptions] = useState(false); const [showSemanticRankerOption, setShowSemanticRankerOption] = useState(false); const [showVectorOption, setShowVectorOption] = useState(false); + const [showGroundingSearchOption, setShowGroundingSearchOption] = useState(false); const [showUserUpload, setShowUserUpload] = useState(false); const [showLanguagePicker, setshowLanguagePicker] = useState(false); const [showSpeechInput, setShowSpeechInput] = useState(false); @@ -111,6 +113,7 @@ const Chat = () => { setShowSpeechOutputAzure(config.showSpeechOutputAzure); setShowChatHistoryBrowser(config.showChatHistoryBrowser); setShowChatHistoryCosmos(config.showChatHistoryCosmos); + setShowGroundingSearchOption(config.showGroundingSearchOption); }); }; @@ -204,6 +207,7 @@ const Chat = () => { use_gpt4v: useGPT4V, gpt4v_input: gpt4vInput, language: i18n.language, + use_grounding_search: useGroundingSearch, ...(seed !== null ? { seed: seed } : {}) } }, @@ -318,6 +322,9 @@ const Chat = () => { case "retrievalMode": setRetrievalMode(value); break; + case "useGroundingSearch": + setUseGroundingSearch(value); + break; } }; @@ -504,11 +511,13 @@ const Chat = () => { includeCategory={includeCategory} retrievalMode={retrievalMode} useGPT4V={useGPT4V} + useGroundingSearch={useGroundingSearch} gpt4vInput={gpt4vInput} vectorFieldList={vectorFieldList} showSemanticRankerOption={showSemanticRankerOption} showGPT4VOptions={showGPT4VOptions} showVectorOption={showVectorOption} + showGroundingSearchOption={showGroundingSearchOption} useOidSecurityFilter={useOidSecurityFilter} useGroupsSecurityFilter={useGroupsSecurityFilter} useLogin={!!useLogin} diff --git a/docs/deploy_features.md b/docs/deploy_features.md index e6d1503d40..273d17ef5f 100644 --- a/docs/deploy_features.md +++ b/docs/deploy_features.md @@ -341,3 +341,21 @@ If you want to decrease the charges by using local parsers instead of Azure Docu 1. Run `azd env set USE_LOCAL_HTML_PARSER true` to use the local HTML parser. The local parsers will be used the next time you run the data ingestion script. To use these parsers for the user document upload system, you'll need to run `azd provision` to update the web app to use the local parsers. + +## Using Bing Search Grounding + +To enable Bing Search, first provision a Bing.Search API endpoint in the [Azure Portal](https://portal.azure.com/#create/Microsoft.BingSearch). You will need access to the Web Search API. The [F1 (free), S1, or S2 SKUS will be suitable](https://www.microsoft.com/bing/apis/pricing). + +Then enable the feature: + +```console +azd env set USE_BING_SEARCH true +``` + +Set the API key via the BING_SEARCH_API_KEY command: + +```console +azd env set BING_SEARCH_API_KEY +``` + +Note that Managed Identity is not available in Bing Search API. diff --git a/infra/main.bicep b/infra/main.bicep index 88d9a0e845..b36e0ce2ca 100644 --- a/infra/main.bicep +++ b/infra/main.bicep @@ -246,6 +246,11 @@ param useUserUpload bool = false param useLocalPdfParser bool = false param useLocalHtmlParser bool = false +@description('Use Bing search for web search grounding') +param useBingSearch bool = false +param bingSearchApiKey string = '' +param bingSearchEndpoint string = '' + var abbrs = loadJsonContent('abbreviations.json') var resourceToken = toLower(uniqueString(subscription().id, environmentName, location)) var tags = { 'azd-env-name': environmentName } @@ -424,6 +429,9 @@ var appEnvVariables = { USE_VECTORS: useVectors USE_GPT4V: useGPT4V USE_USER_UPLOAD: useUserUpload + USE_BING_SEARCH: useBingSearch + BING_SEARCH_API_KEY: bingSearchApiKey + BING_SEARCH_ENDPOINT: bingSearchEndpoint AZURE_USERSTORAGE_ACCOUNT: useUserUpload ? userStorage.outputs.name : '' AZURE_USERSTORAGE_CONTAINER: useUserUpload ? userStorageContainerName : '' AZURE_DOCUMENTINTELLIGENCE_SERVICE: documentIntelligence.outputs.name diff --git a/infra/main.parameters.json b/infra/main.parameters.json index 879e44b77c..d1fe6417dd 100644 --- a/infra/main.parameters.json +++ b/infra/main.parameters.json @@ -299,6 +299,15 @@ "useUserUpload": { "value": "${USE_USER_UPLOAD}" }, + "useBingSearch": { + "value": "${USE_BING_SEARCH}" + }, + "bingSearchApiKey": { + "value": "${BING_SEARCH_API_KEY}" + }, + "bingSearchEndpoint": { + "value": "${BING_SEARCH_ENDPOINT}" + }, "useLocalPdfParser": { "value": "${USE_LOCAL_PDF_PARSER}" }, diff --git a/tests/bing/example_result.json b/tests/bing/example_result.json new file mode 100644 index 0000000000..a2fca7361b --- /dev/null +++ b/tests/bing/example_result.json @@ -0,0 +1,249 @@ +{ + "_type": "SearchResponse", + "queryContext": { + "originalQuery": "Who won the 2024 NBA northern division?" + }, + "webPages": { + "webSearchUrl": "https:\\/\\/www.bing.com\\/search?q=Who+won+the+2024+NBA+northern+division%3f", + "totalEstimatedMatches": 237000, + "value": [ + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.0", + "name": "NBA Team Standings & Stats | NBA.com", + "url": "https:\\/\\/www.nba.com\\/standings?Season=2024-25&os=vb..&ref=app", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.nba.com\\/standings?Season=2024-25&os=vb..&ref=app", + "snippet": "NBA 2024-25 Regular Season Standings. Season ... Head-to-head won-lost percentage (2) Division leader wins tie from team not leading a division (3) Division won-lost percentage for teams in the ...", + "dateLastCrawled": "2025-01-21T07:08:00.0000000Z", + "language": "en", + "isNavigational": true, + "noCache": false, + "siteName": "NBA" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.1", + "name": "2024-25 NBA Standings: Division - FOX Sports", + "url": "https:\\/\\/www.foxsports.com\\/nba\\/standings?type=division", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.foxsports.com\\/nba\\/standings?type=division", + "snippet": "2024-25 nba division standings. atlantic. w-l pct gb pf pa home away conf div l10 strk 1. celtics 30-13 .698 - 117.7 108.1 ...", + "dateLastCrawled": "2025-01-20T16:38:00.0000000Z", + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "Fox Sports" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.2", + "contractualRules": [ + { + "_type": "ContractualRules\\/LicenseAttribution", + "targetPropertyName": "snippet", + "targetPropertyIndex": 2, + "mustBeCloseToContent": true, + "license": { + "name": "CC-BY-SA", + "url": "http:\\/\\/creativecommons.org\\/licenses\\/by-sa\\/3.0\\/" + }, + "licenseNotice": "Text under CC-BY-SA licence" + } + ], + "name": "2024 NBA playoffs - Wikipedia", + "url": "https:\\/\\/en.wikipedia.org\\/wiki\\/2024_NBA_playoffs", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/en.wikipedia.org\\/wiki\\/2024_NBA_playoffs", + "snippet": "The 2024 NBA playoffs was the postseason tournament of the National Basketball Association's (NBA) 2023–24 season. ... and the numbers to the right indicate the number of games the team won in that round. The division champions are marked by an asterisk. Teams with home court advantage, the higher seeded team, are shown in italics.", + "dateLastCrawled": "2025-01-22T16:35:00.0000000Z", + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "Wikipedia" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.3", + "name": "2024 NBA Playoffs | Official Bracket, Schedule and Series Matchups", + "url": "https:\\/\\/www.nba.com\\/playoffs\\/2024?os=win&ref=app", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.nba.com\\/playoffs\\/2024?os=win&ref=app", + "snippet": "The official site of the 2024 NBA Playoffs. Latest news, schedules, matchups, highlights, bracket and more. ... clinching title with a 4-1 series win. Horford finally champ after key sacrifice.", + "dateLastCrawled": "2025-01-17T08:53:00.0000000Z", + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "NBA" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.4", + "name": "2024-2025 NBA Schedule and Scores: Full List - LandOfBasketball.com", + "url": "https:\\/\\/www.landofbasketball.com\\/results\\/2024_2025_scores_full.htm", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.landofbasketball.com\\/results\\/2024_2025_scores_full.htm", + "snippet": "All the results of NBA games played in the 2024-25 Season. Complete list with date, points scored, location and other information. October \\/ November \\/ December \\/ January \\/ February \\/ March \\/ April \\/ May \\/ June. 2024-25 NBA Schedule and Scores: Full List Full List - NBA Regular Season Oct. 22, 2024:", + "dateLastCrawled": "2025-01-22T07:16:00.0000000Z", + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "LandOfBasketball.com" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.5", + "name": "2024-2025 NBA Standings - plaintextsports.com", + "url": "https:\\/\\/plaintextsports.com\\/nba\\/2024-2025\\/standings", + "thumbnailUrl": "https:\\/\\/www.bing.com\\/th?id=OIP.XG7Vso_hVPpfTXTNNgr67gHaHa&w=80&h=80&c=1&pid=5.1", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/plaintextsports.com\\/nba\\/2024-2025\\/standings", + "snippet": "2024-2025 NBA Standings. Division Conference Overall NBA Cup. Eastern Conference. Atlantic: W-L PCT GB STRK L10 HOME AWAY. 1:BOS 30-13 .698 - W1 6-4 15-8 15-5. 2:NYK 28-16 .636 2.5 W1 4-6 14-8 14-8. ... What is the NBA Cup? Home games are in italics. Eastern Conference. East A:", + "dateLastCrawled": "2025-01-21T23:11:00.0000000Z", + "primaryImageOfPage": { + "thumbnailUrl": "https:\\/\\/www.bing.com\\/th?id=OIP.XG7Vso_hVPpfTXTNNgr67gHaHa&w=80&h=80&c=1&pid=5.1", + "width": 80, + "height": 80, + "sourceWidth": 474, + "sourceHeight": 474, + "imageId": "OIP.XG7Vso_hVPpfTXTNNgr67gHaHa" + }, + "language": "en", + "isNavigational": false, + "noCache": false + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.6", + "name": "2023-24 NBA Standings - Basketball-Reference.com", + "url": "https:\\/\\/www.basketball-reference.com\\/leagues\\/NBA_2024_standings.html", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.basketball-reference.com\\/leagues\\/NBA_2024_standings.html", + "snippet": "Checkout the latest 2023-24 NBA Standing including Conference and Division Standings, Expanding Standings, Team vs Team Stats and more on Basketball-Reference.com. ... 2022-23 Standings 2024-25 Standings. League Champion: Boston Celtics.", + "dateLastCrawled": "2025-01-22T02:07:00.0000000Z", + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "Basketball-Reference.com" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.7", + "name": "2024 NBA Playoffs Summary - Basketball-Reference.com", + "url": "https:\\/\\/www.basketball-reference.com\\/playoffs\\/NBA_2024.html", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.basketball-reference.com\\/playoffs\\/NBA_2024.html", + "snippet": "2024 NBA Playoffs Summary 2023 Playoffs Summary. League Champion: Boston Celtics. Finals MVP: Jaylen Brown (20.8 \\/ 5.4 \\/ 5.0) 2024 Playoff Leaders:", + "dateLastCrawled": "2025-01-21T08:09:00.0000000Z", + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "Basketball-Reference.com" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.8", + "name": "NBA 2023-24 Regular Season Standings - LandOfBasketball.com", + "url": "https:\\/\\/www.landofbasketball.com\\/yearbyyear\\/2023_2024_standings.htm", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.landofbasketball.com\\/yearbyyear\\/2023_2024_standings.htm", + "snippet": "Complete 2023-2024 NBA season standings, with conference and division rank and teams that qualified for the playoffs. Land Of Basketball.com. Teams, players profiles, awards, stats, records and championships. Menu. NBA Seasons: 2023-2024 Season Final Standings. ...", + "dateLastCrawled": "2025-01-21T00:40:00.0000000Z", + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "LandOfBasketball.com" + }, + { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.9", + "name": "2024 NBA Finals | NBA.com", + "url": "https:\\/\\/www.nba.com\\/playoffs\\/2024\\/nba-finals?os=wtmbTQtAJk9ya&ref=app", + "thumbnailUrl": "https:\\/\\/www.bing.com\\/th?id=OIP.DoilHjZHaPBaU4LsW342SgHaEK&w=80&h=80&c=1&pid=5.1", + "isFamilyFriendly": true, + "displayUrl": "https:\\/\\/www.nba.com\\/playoffs\\/2024\\/nba-finals?os=wtmbTQtAJk9ya&ref=app", + "snippet": "Even with Kristaps Porzingis missing much of the 2024 NBA playoffs, the Celtics prove deep enough to emerge as champions. ... Tatum: Title win is '10x better than imagined' 00:58. Porzingis on ...", + "dateLastCrawled": "2025-01-22T11:02:00.0000000Z", + "primaryImageOfPage": { + "thumbnailUrl": "https:\\/\\/www.bing.com\\/th?id=OIP.DoilHjZHaPBaU4LsW342SgHaEK&w=80&h=80&c=1&pid=5.1", + "width": 80, + "height": 80, + "sourceWidth": 474, + "sourceHeight": 266, + "imageId": "OIP.DoilHjZHaPBaU4LsW342SgHaEK" + }, + "language": "en", + "isNavigational": false, + "noCache": false, + "siteName": "NBA" + } + ], + "someResultsRemoved": true + }, + "rankingResponse": { + "mainline": { + "items": [ + { + "answerType": "WebPages", + "resultIndex": 0, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.0" + } + }, + { + "answerType": "WebPages", + "resultIndex": 1, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.1" + } + }, + { + "answerType": "WebPages", + "resultIndex": 2, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.2" + } + }, + { + "answerType": "WebPages", + "resultIndex": 3, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.3" + } + }, + { + "answerType": "WebPages", + "resultIndex": 4, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.4" + } + }, + { + "answerType": "WebPages", + "resultIndex": 5, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.5" + } + }, + { + "answerType": "WebPages", + "resultIndex": 6, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.6" + } + }, + { + "answerType": "WebPages", + "resultIndex": 7, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.7" + } + }, + { + "answerType": "WebPages", + "resultIndex": 8, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.8" + } + }, + { + "answerType": "WebPages", + "resultIndex": 9, + "value": { + "id": "https:\\/\\/api.bing.microsoft.com\\/api\\/v7\\/#WebPages.9" + } + } + ] + } + } +} \ No newline at end of file