Skip to content

Commit 01f4cf9

Browse files
Auto-generated API code (#2626)
1 parent 85dea32 commit 01f4cf9

File tree

3 files changed

+22
-2
lines changed

3 files changed

+22
-2
lines changed

docs/reference.asciidoc

+4-2
Original file line numberDiff line numberDiff line change
@@ -2444,6 +2444,8 @@ aggregation for its associated searches. You can retrieve these stats using
24442444
the indices stats API.
24452445
** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout.
24462446
When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster.
2447+
** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available.
2448+
Ongoing async searches and any saved search results are deleted after this period.
24472449
** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`.
24482450
** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)
24492451
** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout
@@ -7692,7 +7694,7 @@ client.inference.put({ inference_id })
76927694
* *Request (object):*
76937695
** *`inference_id` (string)*: The inference Id
76947696
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type
7695-
** *`inference_config` (Optional, { service, service_settings, task_settings })*
7697+
** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })*
76967698

76977699
[discrete]
76987700
==== stream_inference
@@ -7743,7 +7745,7 @@ client.inference.update({ inference_id })
77437745
* *Request (object):*
77447746
** *`inference_id` (string)*: The unique identifier of the inference endpoint.
77457747
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs.
7746-
** *`inference_config` (Optional, { service, service_settings, task_settings })*
7748+
** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })*
77477749

77487750
[discrete]
77497751
=== ingest

src/api/types.ts

+9
Original file line numberDiff line numberDiff line change
@@ -6683,6 +6683,7 @@ export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSea
66836683
export interface AsyncSearchSubmitRequest extends RequestBase {
66846684
index?: Indices
66856685
wait_for_completion_timeout?: Duration
6686+
keep_alive?: Duration
66866687
keep_on_completion?: boolean
66876688
allow_no_indices?: boolean
66886689
allow_partial_search_results?: boolean
@@ -12659,7 +12660,15 @@ export type InferenceDenseByteVector = byte[]
1265912660

1266012661
export type InferenceDenseVector = float[]
1266112662

12663+
export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint {
12664+
max_chunk_size?: integer
12665+
overlap?: integer
12666+
sentence_overlap?: integer
12667+
strategy?: string
12668+
}
12669+
1266212670
export interface InferenceInferenceEndpoint {
12671+
chunking_settings?: InferenceInferenceChunkingSettings
1266312672
service: string
1266412673
service_settings: InferenceServiceSettings
1266512674
task_settings?: InferenceTaskSettings

src/api/typesWithBodyKey.ts

+9
Original file line numberDiff line numberDiff line change
@@ -6760,6 +6760,7 @@ export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSea
67606760
export interface AsyncSearchSubmitRequest extends RequestBase {
67616761
index?: Indices
67626762
wait_for_completion_timeout?: Duration
6763+
keep_alive?: Duration
67636764
keep_on_completion?: boolean
67646765
allow_no_indices?: boolean
67656766
allow_partial_search_results?: boolean
@@ -12899,7 +12900,15 @@ export type InferenceDenseByteVector = byte[]
1289912900

1290012901
export type InferenceDenseVector = float[]
1290112902

12903+
export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint {
12904+
max_chunk_size?: integer
12905+
overlap?: integer
12906+
sentence_overlap?: integer
12907+
strategy?: string
12908+
}
12909+
1290212910
export interface InferenceInferenceEndpoint {
12911+
chunking_settings?: InferenceInferenceChunkingSettings
1290312912
service: string
1290412913
service_settings: InferenceServiceSettings
1290512914
task_settings?: InferenceTaskSettings

0 commit comments

Comments
 (0)