diff --git a/.gitignore b/.gitignore index 2c2997cf..012aacfc 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ out/ .DS_Store .terraform *for_tests.yaml +.idea/ # Files and directories to ignore in the site directory # dependencies diff --git a/api/v1alpha1/api.go b/api/v1alpha1/api.go index 6196fcf9..6067ec50 100644 --- a/api/v1alpha1/api.go +++ b/api/v1alpha1/api.go @@ -51,7 +51,7 @@ type LLMRouteSpec struct { // Currently, the only supported schema is OpenAI as the input schema. // // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self.schema == 'OpenAI'" + // +kubebuilder:validation:XValidation:rule="self.name == 'OpenAI'" APISchema LLMAPISchema `json:"schema"` // Rules is the list of LLMRouteRule that this LLMRoute will match the traffic to. // Each rule is a subset of the HTTPRoute in the Gateway API (https://gateway-api.sigs.k8s.io/api-types/httproute/). @@ -183,27 +183,27 @@ type LLMBackendSpec struct { // Note that this is vendor specific, and the stability of the API schema is not guaranteed by // the ai-gateway, but by the vendor via proper versioning. type LLMAPISchema struct { - // Schema is the API schema of the LLMRoute or LLMBackend. + // Name is the name of the API schema of the LLMRoute or LLMBackend. // // +kubebuilder:validation:Enum=OpenAI;AWSBedrock - Schema APISchema `json:"schema"` + Name APISchemaName `json:"name"` // Version is the version of the API schema. Version string `json:"version,omitempty"` } -// APISchema defines the API schema. -type APISchema string +// APISchemaName defines the name of the API schema. +type APISchemaName string const ( // APISchemaOpenAI is the OpenAI schema. // // https://github.com/openai/openai-openapi - APISchemaOpenAI APISchema = "OpenAI" + APISchemaOpenAI APISchemaName = "OpenAI" // APISchemaAWSBedrock is the AWS Bedrock schema. // // https://docs.aws.amazon.com/bedrock/latest/APIReference/API_Operations_Amazon_Bedrock_Runtime.html - APISchemaAWSBedrock APISchema = "AWSBedrock" + APISchemaAWSBedrock APISchemaName = "AWSBedrock" ) const ( diff --git a/filterconfig/filterconfig.go b/filterconfig/filterconfig.go index 87c9024a..51e45561 100644 --- a/filterconfig/filterconfig.go +++ b/filterconfig/filterconfig.go @@ -20,7 +20,7 @@ import ( // fallback when the configuration is not explicitly provided. const DefaultConfig = ` schema: - schema: OpenAI + name: OpenAI selectedBackendHeaderKey: x-envoy-ai-gateway-selected-backend modelNameHeaderKey: x-envoy-ai-gateway-model ` @@ -30,7 +30,7 @@ modelNameHeaderKey: x-envoy-ai-gateway-model // # Example configuration: // // schema: -// schema: OpenAI +// name: OpenAI // selectedBackendHeaderKey: x-envoy-ai-gateway-selected-backend // modelNameHeaderKey: x-envoy-ai-gateway-model // tokenUsageMetadata: @@ -41,18 +41,18 @@ modelNameHeaderKey: x-envoy-ai-gateway-model // - name: kserve // weight: 1 // schema: -// schema: OpenAI +// name: OpenAI // - name: awsbedrock // weight: 10 // schema: -// schema: AWSBedrock +// name: AWSBedrock // headers: // - name: x-envoy-ai-gateway-model // value: llama3.3333 // - backends: // - name: openai // schema: -// schema: OpenAI +// name: OpenAI // headers: // - name: x-envoy-ai-gateway-model // value: gpt4.4444 @@ -96,18 +96,18 @@ type TokenUsageMetadata struct { // VersionedAPISchema corresponds to LLMAPISchema in api/v1alpha1/api.go. type VersionedAPISchema struct { - // Schema is the API schema. - Schema APISchema `yaml:"schema"` + // Name is the name of the API schema. + Name APISchemaName `yaml:"name"` // Version is the version of the API schema. Optional. Version string `yaml:"version,omitempty"` } -// APISchema corresponds to APISchema in api/v1alpha1/api.go. -type APISchema string +// APISchemaName corresponds to APISchemaName in api/v1alpha1/api.go. +type APISchemaName string const ( - APISchemaOpenAI APISchema = "OpenAI" - APISchemaAWSBedrock APISchema = "AWSBedrock" + APISchemaOpenAI APISchemaName = "OpenAI" + APISchemaAWSBedrock APISchemaName = "AWSBedrock" ) // HeaderMatch is an alias for HTTPHeaderMatch of the Gateway API. diff --git a/filterconfig/filterconfig_test.go b/filterconfig/filterconfig_test.go index e01c4308..7d35618d 100644 --- a/filterconfig/filterconfig_test.go +++ b/filterconfig/filterconfig_test.go @@ -30,7 +30,7 @@ func TestUnmarshalConfigYaml(t *testing.T) { configPath := path.Join(t.TempDir(), "config.yaml") const config = ` schema: - schema: OpenAI + name: OpenAI selectedBackendHeaderKey: x-envoy-ai-gateway-selected-backend modelNameHeaderKey: x-envoy-ai-gateway-model tokenUsageMetadata: @@ -41,18 +41,18 @@ rules: - name: kserve weight: 1 schema: - schema: OpenAI + name: OpenAI - name: awsbedrock weight: 10 schema: - schema: AWSBedrock + name: AWSBedrock headers: - name: x-envoy-ai-gateway-model value: llama3.3333 - backends: - name: openai schema: - schema: OpenAI + name: OpenAI headers: - name: x-envoy-ai-gateway-model value: gpt4.4444 @@ -62,7 +62,7 @@ rules: require.NoError(t, err) require.Equal(t, "ai_gateway_llm_ns", cfg.TokenUsageMetadata.Namespace) require.Equal(t, "token_usage_key", cfg.TokenUsageMetadata.Key) - require.Equal(t, "OpenAI", string(cfg.Schema.Schema)) + require.Equal(t, "OpenAI", string(cfg.Schema.Name)) require.Equal(t, "x-envoy-ai-gateway-selected-backend", cfg.SelectedBackendHeaderKey) require.Equal(t, "x-envoy-ai-gateway-model", cfg.ModelNameHeaderKey) require.Len(t, cfg.Rules, 2) @@ -70,7 +70,7 @@ rules: require.Equal(t, "gpt4.4444", cfg.Rules[1].Headers[0].Value) require.Equal(t, "kserve", cfg.Rules[0].Backends[0].Name) require.Equal(t, 10, cfg.Rules[0].Backends[1].Weight) - require.Equal(t, "AWSBedrock", string(cfg.Rules[0].Backends[1].Schema.Schema)) + require.Equal(t, "AWSBedrock", string(cfg.Rules[0].Backends[1].Schema.Name)) require.Equal(t, "openai", cfg.Rules[1].Backends[0].Name) - require.Equal(t, "OpenAI", string(cfg.Rules[1].Backends[0].Schema.Schema)) + require.Equal(t, "OpenAI", string(cfg.Rules[1].Backends[0].Schema.Name)) } diff --git a/internal/controller/sink.go b/internal/controller/sink.go index 145e44a5..5b45732f 100644 --- a/internal/controller/sink.go +++ b/internal/controller/sink.go @@ -226,7 +226,7 @@ func (c *configSink) updateExtProcConfigMap(llmRoute *aigv1a1.LLMRoute) error { ec := &filterconfig.Config{} spec := &llmRoute.Spec - ec.Schema.Schema = filterconfig.APISchema(spec.APISchema.Schema) + ec.Schema.Name = filterconfig.APISchemaName(spec.APISchema.Name) ec.Schema.Version = spec.APISchema.Version ec.ModelNameHeaderKey = aigv1a1.LLMModelHeaderKey ec.SelectedBackendHeaderKey = selectedBackendHeaderKey @@ -242,7 +242,7 @@ func (c *configSink) updateExtProcConfigMap(llmRoute *aigv1a1.LLMRoute) error { err = fmt.Errorf("backend %s not found", key) return err } else { - ec.Rules[i].Backends[j].Schema.Schema = filterconfig.APISchema(backendObj.Spec.APISchema.Schema) + ec.Rules[i].Backends[j].Schema.Name = filterconfig.APISchemaName(backendObj.Spec.APISchema.Name) ec.Rules[i].Backends[j].Schema.Version = backendObj.Spec.APISchema.Version } } diff --git a/internal/controller/sink_test.go b/internal/controller/sink_test.go index 5e37f861..d8d8e3c7 100644 --- a/internal/controller/sink_test.go +++ b/internal/controller/sink_test.go @@ -78,7 +78,7 @@ func TestConfigSink_init(t *testing.T) { BackendRef: egv1a1.BackendRef{ BackendObjectReference: gwapiv1.BackendObjectReference{Name: "some-backend1", Namespace: ptr.To[gwapiv1.Namespace]("ns1")}, }, - APISchema: aigv1a1.LLMAPISchema{Schema: aigv1a1.APISchemaOpenAI}, + APISchema: aigv1a1.LLMAPISchema{Name: aigv1a1.APISchemaOpenAI}, }, }, { @@ -87,7 +87,7 @@ func TestConfigSink_init(t *testing.T) { BackendRef: egv1a1.BackendRef{ BackendObjectReference: gwapiv1.BackendObjectReference{Name: "some-backend2", Namespace: ptr.To[gwapiv1.Namespace]("ns1")}, }, - APISchema: aigv1a1.LLMAPISchema{Schema: aigv1a1.APISchemaAWSBedrock}, + APISchema: aigv1a1.LLMAPISchema{Name: aigv1a1.APISchemaAWSBedrock}, }, }, { @@ -96,7 +96,7 @@ func TestConfigSink_init(t *testing.T) { BackendRef: egv1a1.BackendRef{ BackendObjectReference: gwapiv1.BackendObjectReference{Name: "some-backend3", Namespace: ptr.To[gwapiv1.Namespace]("ns1")}, }, - APISchema: aigv1a1.LLMAPISchema{Schema: aigv1a1.APISchemaOpenAI}, + APISchema: aigv1a1.LLMAPISchema{Name: aigv1a1.APISchemaOpenAI}, }, }, { @@ -105,7 +105,7 @@ func TestConfigSink_init(t *testing.T) { BackendRef: egv1a1.BackendRef{ BackendObjectReference: gwapiv1.BackendObjectReference{Name: "some-backend4", Namespace: ptr.To[gwapiv1.Namespace]("ns1")}, }, - APISchema: aigv1a1.LLMAPISchema{Schema: aigv1a1.APISchemaOpenAI}, + APISchema: aigv1a1.LLMAPISchema{Name: aigv1a1.APISchemaOpenAI}, }, }, } { @@ -212,7 +212,7 @@ func TestConfigSink_syncLLMRoute(t *testing.T) { BackendRefs: []aigv1a1.LLMRouteRuleBackendRef{{Name: "apple", Weight: 1}, {Name: "orange", Weight: 1}}, }, }, - APISchema: aigv1a1.LLMAPISchema{Schema: aigv1a1.APISchemaOpenAI, Version: "v123"}, + APISchema: aigv1a1.LLMAPISchema{Name: aigv1a1.APISchemaOpenAI, Version: "v123"}, }, } err := fakeClient.Create(context.Background(), route, &client.CreateOptions{}) @@ -389,7 +389,7 @@ func Test_updateExtProcConfigMap(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "apple", Namespace: "ns"}, Spec: aigv1a1.LLMBackendSpec{ APISchema: aigv1a1.LLMAPISchema{ - Schema: aigv1a1.APISchemaAWSBedrock, + Name: aigv1a1.APISchemaAWSBedrock, }, BackendRef: egv1a1.BackendRef{ BackendObjectReference: gwapiv1.BackendObjectReference{Name: "some-backend1", Namespace: ptr.To[gwapiv1.Namespace]("ns")}, @@ -425,7 +425,7 @@ func Test_updateExtProcConfigMap(t *testing.T) { route: &aigv1a1.LLMRoute{ ObjectMeta: metav1.ObjectMeta{Name: "myroute", Namespace: "ns"}, Spec: aigv1a1.LLMRouteSpec{ - APISchema: aigv1a1.LLMAPISchema{Schema: aigv1a1.APISchemaOpenAI, Version: "v123"}, + APISchema: aigv1a1.LLMAPISchema{Name: aigv1a1.APISchemaOpenAI, Version: "v123"}, Rules: []aigv1a1.LLMRouteRule{ { BackendRefs: []aigv1a1.LLMRouteRuleBackendRef{ @@ -446,13 +446,13 @@ func Test_updateExtProcConfigMap(t *testing.T) { }, }, exp: &filterconfig.Config{ - Schema: filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI, Version: "v123"}, + Schema: filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI, Version: "v123"}, ModelNameHeaderKey: aigv1a1.LLMModelHeaderKey, SelectedBackendHeaderKey: selectedBackendHeaderKey, Rules: []filterconfig.RouteRule{ { Backends: []filterconfig.Backend{ - {Name: "apple.ns", Weight: 1, Schema: filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaAWSBedrock}}, {Name: "pineapple.ns", Weight: 2}, + {Name: "apple.ns", Weight: 1, Schema: filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaAWSBedrock}}, {Name: "pineapple.ns", Weight: 2}, }, Headers: []filterconfig.HeaderMatch{{Name: aigv1a1.LLMModelHeaderKey, Value: "some-ai"}}, }, diff --git a/internal/extproc/processor_test.go b/internal/extproc/processor_test.go index 5363797b..f21f7586 100644 --- a/internal/extproc/processor_test.go +++ b/internal/extproc/processor_test.go @@ -95,7 +95,7 @@ func TestProcessor_ProcessRequestBody(t *testing.T) { rbp := mockRequestBodyParser{t: t, retModelName: "some-model", expPath: "/foo"} rt := mockRouter{ t: t, expHeaders: headers, retBackendName: "some-backend", - retVersionedAPISchema: filterconfig.VersionedAPISchema{Schema: "some-schema", Version: "v10.0"}, + retVersionedAPISchema: filterconfig.VersionedAPISchema{Name: "some-schema", Version: "v10.0"}, } p := &Processor{config: &processorConfig{ bodyParser: rbp.impl, router: rt, @@ -109,13 +109,13 @@ func TestProcessor_ProcessRequestBody(t *testing.T) { rbp := mockRequestBodyParser{t: t, retModelName: "some-model", expPath: "/foo"} rt := mockRouter{ t: t, expHeaders: headers, retBackendName: "some-backend", - retVersionedAPISchema: filterconfig.VersionedAPISchema{Schema: "some-schema", Version: "v10.0"}, + retVersionedAPISchema: filterconfig.VersionedAPISchema{Name: "some-schema", Version: "v10.0"}, } factory := mockTranslatorFactory{t: t, retErr: errors.New("test error"), expPath: "/foo"} p := &Processor{config: &processorConfig{ bodyParser: rbp.impl, router: rt, factories: map[filterconfig.VersionedAPISchema]translator.Factory{ - {Schema: "some-schema", Version: "v10.0"}: factory.impl, + {Name: "some-schema", Version: "v10.0"}: factory.impl, }, }, requestHeaders: headers} _, err := p.ProcessRequestBody(context.Background(), &extprocv3.HttpBody{}) @@ -126,13 +126,13 @@ func TestProcessor_ProcessRequestBody(t *testing.T) { rbp := mockRequestBodyParser{t: t, retModelName: "some-model", expPath: "/foo"} rt := mockRouter{ t: t, expHeaders: headers, retBackendName: "some-backend", - retVersionedAPISchema: filterconfig.VersionedAPISchema{Schema: "some-schema", Version: "v10.0"}, + retVersionedAPISchema: filterconfig.VersionedAPISchema{Name: "some-schema", Version: "v10.0"}, } factory := mockTranslatorFactory{t: t, retTranslator: mockTranslator{t: t, retErr: errors.New("test error")}, expPath: "/foo"} p := &Processor{config: &processorConfig{ bodyParser: rbp.impl, router: rt, factories: map[filterconfig.VersionedAPISchema]translator.Factory{ - {Schema: "some-schema", Version: "v10.0"}: factory.impl, + {Name: "some-schema", Version: "v10.0"}: factory.impl, }, }, requestHeaders: headers} _, err := p.ProcessRequestBody(context.Background(), &extprocv3.HttpBody{}) @@ -144,7 +144,7 @@ func TestProcessor_ProcessRequestBody(t *testing.T) { rbp := mockRequestBodyParser{t: t, retModelName: "some-model", expPath: "/foo", retRb: someBody} rt := mockRouter{ t: t, expHeaders: headers, retBackendName: "some-backend", - retVersionedAPISchema: filterconfig.VersionedAPISchema{Schema: "some-schema", Version: "v10.0"}, + retVersionedAPISchema: filterconfig.VersionedAPISchema{Name: "some-schema", Version: "v10.0"}, } headerMut := &extprocv3.HeaderMutation{} bodyMut := &extprocv3.BodyMutation{} @@ -153,7 +153,7 @@ func TestProcessor_ProcessRequestBody(t *testing.T) { p := &Processor{config: &processorConfig{ bodyParser: rbp.impl, router: rt, factories: map[filterconfig.VersionedAPISchema]translator.Factory{ - {Schema: "some-schema", Version: "v10.0"}: factory.impl, + {Name: "some-schema", Version: "v10.0"}: factory.impl, }, selectedBackendHeaderKey: "x-ai-gateway-backend-key", ModelNameHeaderKey: "x-ai-gateway-model-key", diff --git a/internal/extproc/router/request_body.go b/internal/extproc/router/request_body.go index 91661420..d1361fac 100644 --- a/internal/extproc/router/request_body.go +++ b/internal/extproc/router/request_body.go @@ -15,7 +15,7 @@ type RequestBodyParser func(path string, body *extprocv3.HttpBody) (modelName st // NewRequestBodyParser creates a new RequestBodyParser based on the schema. func NewRequestBodyParser(schema filterconfig.VersionedAPISchema) (RequestBodyParser, error) { - if schema.Schema == filterconfig.APISchemaOpenAI { + if schema.Name == filterconfig.APISchemaOpenAI { return openAIParseBody, nil } return nil, fmt.Errorf("unsupported API schema: %s", schema) diff --git a/internal/extproc/router/request_body_test.go b/internal/extproc/router/request_body_test.go index d5e4049e..e42469e9 100644 --- a/internal/extproc/router/request_body_test.go +++ b/internal/extproc/router/request_body_test.go @@ -13,12 +13,12 @@ import ( func TestNewRequestBodyParser(t *testing.T) { t.Run("ok", func(t *testing.T) { - res, err := NewRequestBodyParser(filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}) + res, err := NewRequestBodyParser(filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}) require.NotNil(t, res) require.NoError(t, err) }) t.Run("error", func(t *testing.T) { - res, err := NewRequestBodyParser(filterconfig.VersionedAPISchema{Schema: "foo"}) + res, err := NewRequestBodyParser(filterconfig.VersionedAPISchema{Name: "foo"}) require.Nil(t, res) require.Error(t, err) }) diff --git a/internal/extproc/router/router_test.go b/internal/extproc/router/router_test.go index cc56a0b9..5d0095bd 100644 --- a/internal/extproc/router/router_test.go +++ b/internal/extproc/router/router_test.go @@ -9,7 +9,7 @@ import ( ) func TestRouter_Calculate(t *testing.T) { - outSchema := filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI} + outSchema := filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI} _r, err := NewRouter(&filterconfig.Config{ Rules: []filterconfig.RouteRule{ { @@ -68,7 +68,7 @@ func TestRouter_selectBackendFromRule(t *testing.T) { r, ok := _r.(*router) require.True(t, ok) - outSchema := filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI} + outSchema := filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI} rule := &filterconfig.RouteRule{ Backends: []filterconfig.Backend{ diff --git a/internal/extproc/server_test.go b/internal/extproc/server_test.go index bbcaac56..4c352ca0 100644 --- a/internal/extproc/server_test.go +++ b/internal/extproc/server_test.go @@ -29,7 +29,7 @@ func TestServer_LoadConfig(t *testing.T) { t.Run("invalid input schema", func(t *testing.T) { s := requireNewServerWithMockProcessor(t) err := s.LoadConfig(&filterconfig.Config{ - Schema: filterconfig.VersionedAPISchema{Schema: "some-invalid-schema"}, + Schema: filterconfig.VersionedAPISchema{Name: "some-invalid-schema"}, }) require.Error(t, err) require.ErrorContains(t, err, "cannot create request body parser") @@ -37,14 +37,14 @@ func TestServer_LoadConfig(t *testing.T) { t.Run("ok", func(t *testing.T) { config := &filterconfig.Config{ TokenUsageMetadata: &filterconfig.TokenUsageMetadata{Namespace: "ns", Key: "key"}, - Schema: filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}, + Schema: filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}, SelectedBackendHeaderKey: "x-envoy-ai-gateway-selected-backend", ModelNameHeaderKey: "x-model-name", Rules: []filterconfig.RouteRule{ { Backends: []filterconfig.Backend{ - {Name: "kserve", Schema: filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}}, - {Name: "awsbedrock", Schema: filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaAWSBedrock}}, + {Name: "kserve", Schema: filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}}, + {Name: "awsbedrock", Schema: filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaAWSBedrock}}, }, Headers: []filterconfig.HeaderMatch{ { @@ -55,7 +55,7 @@ func TestServer_LoadConfig(t *testing.T) { }, { Backends: []filterconfig.Backend{ - {Name: "openai", Schema: filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}}, + {Name: "openai", Schema: filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}}, }, Headers: []filterconfig.HeaderMatch{ { @@ -79,8 +79,8 @@ func TestServer_LoadConfig(t *testing.T) { require.Equal(t, "x-envoy-ai-gateway-selected-backend", s.config.selectedBackendHeaderKey) require.Equal(t, "x-model-name", s.config.ModelNameHeaderKey) require.Len(t, s.config.factories, 2) - require.NotNil(t, s.config.factories[filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}]) - require.NotNil(t, s.config.factories[filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaAWSBedrock}]) + require.NotNil(t, s.config.factories[filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}]) + require.NotNil(t, s.config.factories[filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaAWSBedrock}]) }) } diff --git a/internal/extproc/translator/translator.go b/internal/extproc/translator/translator.go index 98b8095d..84265dbc 100644 --- a/internal/extproc/translator/translator.go +++ b/internal/extproc/translator/translator.go @@ -20,9 +20,9 @@ type Factory func(path string) (Translator, error) // NewFactory returns a callback function that creates a translator for the given API schema combination. func NewFactory(in, out filterconfig.VersionedAPISchema) (Factory, error) { - if in.Schema == filterconfig.APISchemaOpenAI { + if in.Name == filterconfig.APISchemaOpenAI { // TODO: currently, we ignore the LLMAPISchema."Version" field. - switch out.Schema { + switch out.Name { case filterconfig.APISchemaOpenAI: return newOpenAIToOpenAITranslator, nil case filterconfig.APISchemaAWSBedrock: diff --git a/internal/extproc/translator/translator_test.go b/internal/extproc/translator/translator_test.go index f14b8f71..64c42023 100644 --- a/internal/extproc/translator/translator_test.go +++ b/internal/extproc/translator/translator_test.go @@ -11,15 +11,15 @@ import ( func TestNewFactory(t *testing.T) { t.Run("error", func(t *testing.T) { _, err := NewFactory( - filterconfig.VersionedAPISchema{Schema: "Foo", Version: "v100"}, - filterconfig.VersionedAPISchema{Schema: "Bar", Version: "v123"}, + filterconfig.VersionedAPISchema{Name: "Foo", Version: "v100"}, + filterconfig.VersionedAPISchema{Name: "Bar", Version: "v123"}, ) require.ErrorContains(t, err, "unsupported API schema combination: client={Foo v100}, backend={Bar v123}") }) t.Run("openai to openai", func(t *testing.T) { f, err := NewFactory( - filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}, - filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}, + filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}, + filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}, ) require.NoError(t, err) require.NotNil(t, f) @@ -32,8 +32,8 @@ func TestNewFactory(t *testing.T) { }) t.Run("openai to aws bedrock", func(t *testing.T) { f, err := NewFactory( - filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI}, - filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaAWSBedrock}, + filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI}, + filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaAWSBedrock}, ) require.NoError(t, err) require.NotNil(t, f) diff --git a/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmbackends.yaml b/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmbackends.yaml index 2cb24f0d..02d9aca4 100644 --- a/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmbackends.yaml +++ b/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmbackends.yaml @@ -169,8 +169,9 @@ spec: This is required to be set. properties: - schema: - description: Schema is the API schema of the LLMRoute or LLMBackend. + name: + description: Name is the name of the API schema of the LLMRoute + or LLMBackend. enum: - OpenAI - AWSBedrock @@ -179,7 +180,7 @@ spec: description: Version is the version of the API schema. type: string required: - - schema + - name type: object required: - backendRef diff --git a/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmroutes.yaml b/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmroutes.yaml index 28870e63..0ddea5da 100644 --- a/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmroutes.yaml +++ b/manifests/charts/ai-gateway-helm/crds/aigateway.envoyproxy.io_llmroutes.yaml @@ -183,8 +183,9 @@ spec: Currently, the only supported schema is OpenAI as the input schema. properties: - schema: - description: Schema is the API schema of the LLMRoute or LLMBackend. + name: + description: Name is the name of the API schema of the LLMRoute + or LLMBackend. enum: - OpenAI - AWSBedrock @@ -193,10 +194,10 @@ spec: description: Version is the version of the API schema. type: string required: - - schema + - name type: object x-kubernetes-validations: - - rule: self.schema == 'OpenAI' + - rule: self.name == 'OpenAI' targetRefs: description: TargetRefs are the names of the Gateway resources this LLMRoute is being attached to. diff --git a/tests/cel-validation/main_test.go b/tests/cel-validation/main_test.go index 0720caa8..5938be71 100644 --- a/tests/cel-validation/main_test.go +++ b/tests/cel-validation/main_test.go @@ -31,11 +31,11 @@ func TestLLMRoutes(t *testing.T) { {name: "basic.yaml"}, { name: "non_openai_schema.yaml", - expErr: `spec.schema: Invalid value: "object": failed rule: self.schema == 'OpenAI'`, + expErr: `spec.schema: Invalid value: "object": failed rule: self.name == 'OpenAI'`, }, { name: "unknown_schema.yaml", - expErr: "spec.schema.schema: Unsupported value: \"SomeRandomVendor\": supported values: \"OpenAI\", \"AWSBedrock\"", + expErr: "spec.schema.name: Unsupported value: \"SomeRandomVendor\": supported values: \"OpenAI\", \"AWSBedrock\"", }, { name: "unsupported_match.yaml", @@ -74,7 +74,7 @@ func TestLLMBackends(t *testing.T) { {name: "basic-eg-backend.yaml"}, { name: "unknown_schema.yaml", - expErr: "spec.schema.schema: Unsupported value: \"SomeRandomVendor\": supported values: \"OpenAI\", \"AWSBedrock\"", + expErr: "spec.schema.name: Unsupported value: \"SomeRandomVendor\": supported values: \"OpenAI\", \"AWSBedrock\"", }, } { t.Run(tc.name, func(t *testing.T) { diff --git a/tests/cel-validation/testdata/llmbackends/basic-eg-backend.yaml b/tests/cel-validation/testdata/llmbackends/basic-eg-backend.yaml index c2ece99b..3899ae86 100644 --- a/tests/cel-validation/testdata/llmbackends/basic-eg-backend.yaml +++ b/tests/cel-validation/testdata/llmbackends/basic-eg-backend.yaml @@ -5,7 +5,7 @@ metadata: namespace: default spec: schema: - schema: AWSBedrock + name: AWSBedrock backendRef: name: eg-backend kind: Backend diff --git a/tests/cel-validation/testdata/llmbackends/basic.yaml b/tests/cel-validation/testdata/llmbackends/basic.yaml index a305ea08..2eb84a72 100644 --- a/tests/cel-validation/testdata/llmbackends/basic.yaml +++ b/tests/cel-validation/testdata/llmbackends/basic.yaml @@ -5,7 +5,7 @@ metadata: namespace: default spec: schema: - schema: AWSBedrock + name: AWSBedrock backendRef: name: dog-service kind: Service diff --git a/tests/cel-validation/testdata/llmbackends/unknown_schema.yaml b/tests/cel-validation/testdata/llmbackends/unknown_schema.yaml index f7ddb28a..ff10bc3f 100644 --- a/tests/cel-validation/testdata/llmbackends/unknown_schema.yaml +++ b/tests/cel-validation/testdata/llmbackends/unknown_schema.yaml @@ -5,5 +5,5 @@ metadata: namespace: default spec: schema: - # Schema must be one of the known schemas, so this is invalid. - schema: SomeRandomVendor + # Name must be one of the known schemas, so this is invalid. + name: SomeRandomVendor diff --git a/tests/cel-validation/testdata/llmroutes/basic.yaml b/tests/cel-validation/testdata/llmroutes/basic.yaml index c85a24e8..776f8dac 100644 --- a/tests/cel-validation/testdata/llmroutes/basic.yaml +++ b/tests/cel-validation/testdata/llmroutes/basic.yaml @@ -5,7 +5,7 @@ metadata: namespace: default spec: schema: - schema: OpenAI + name: OpenAI rules: - matches: - headers: diff --git a/tests/cel-validation/testdata/llmroutes/non_openai_schema.yaml b/tests/cel-validation/testdata/llmroutes/non_openai_schema.yaml index 570ba11a..95823d80 100644 --- a/tests/cel-validation/testdata/llmroutes/non_openai_schema.yaml +++ b/tests/cel-validation/testdata/llmroutes/non_openai_schema.yaml @@ -5,8 +5,8 @@ metadata: namespace: default spec: schema: - # Input must be OpenAI schema at the moment, so this is invalid. - schema: AWSBedrock + # Schema name must be OpenAI schema at the moment, so this is invalid. + name: AWSBedrock rules: - matches: - headers: diff --git a/tests/cel-validation/testdata/llmroutes/unknown_schema.yaml b/tests/cel-validation/testdata/llmroutes/unknown_schema.yaml index 2a360b91..6b0d9cf5 100644 --- a/tests/cel-validation/testdata/llmroutes/unknown_schema.yaml +++ b/tests/cel-validation/testdata/llmroutes/unknown_schema.yaml @@ -5,8 +5,8 @@ metadata: namespace: default spec: schema: - # Schema must be OpenAI schema at the moment, so this is invalid. - schema: SomeRandomVendor + # Schema name must be OpenAI schema at the moment, so this is invalid. + name: SomeRandomVendor rules: - matches: - headers: diff --git a/tests/cel-validation/testdata/llmroutes/unsupported_match.yaml b/tests/cel-validation/testdata/llmroutes/unsupported_match.yaml index 5b4e77f7..e93abbfc 100644 --- a/tests/cel-validation/testdata/llmroutes/unsupported_match.yaml +++ b/tests/cel-validation/testdata/llmroutes/unsupported_match.yaml @@ -5,7 +5,7 @@ metadata: namespace: default spec: schema: - schema: OpenAI + name: OpenAI rules: - matches: - headers: diff --git a/tests/controller/controller_test.go b/tests/controller/controller_test.go index 9072ef9f..72f30c51 100644 --- a/tests/controller/controller_test.go +++ b/tests/controller/controller_test.go @@ -17,7 +17,7 @@ import ( "github.com/go-logr/logr" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" + klog "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -30,7 +30,7 @@ import ( "github.com/envoyproxy/ai-gateway/tests" ) -var defaultSchema = aigv1a1.LLMAPISchema{Schema: aigv1a1.APISchemaOpenAI, Version: "v1"} +var defaultSchema = aigv1a1.LLMAPISchema{Name: aigv1a1.APISchemaOpenAI, Version: "v1"} func extProcName(llmRouteName string) string { return fmt.Sprintf("ai-gateway-llm-route-extproc-%s", llmRouteName) diff --git a/tests/extproc/extproc_test.go b/tests/extproc/extproc_test.go index 05c3e971..673205d8 100644 --- a/tests/extproc/extproc_test.go +++ b/tests/extproc/extproc_test.go @@ -17,7 +17,7 @@ import ( "testing" "time" - "github.com/openai/openai-go" + openai "github.com/openai/openai-go" "github.com/openai/openai-go/option" "github.com/stretchr/testify/require" "sigs.k8s.io/yaml" @@ -29,8 +29,8 @@ import ( var envoyYamlBase string var ( - openAISchema = filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaOpenAI} - awsBedrockSchema = filterconfig.VersionedAPISchema{Schema: filterconfig.APISchemaAWSBedrock} + openAISchema = filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaOpenAI} + awsBedrockSchema = filterconfig.VersionedAPISchema{Name: filterconfig.APISchemaAWSBedrock} ) // TestE2E tests the end-to-end flow of the external processor with Envoy.