diff --git a/cmd/controller/main.go b/cmd/controller/main.go new file mode 100644 index 00000000..2d2aa216 --- /dev/null +++ b/cmd/controller/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "os" + + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/sharedmain" + "knative.dev/pkg/signals" + + "github.com/zeiss/typhoon/pkg/extensions/reconciler/function" + "github.com/zeiss/typhoon/pkg/flow/reconciler/jqtransformation" + "github.com/zeiss/typhoon/pkg/flow/reconciler/synchronizer" + "github.com/zeiss/typhoon/pkg/flow/reconciler/transformation" + "github.com/zeiss/typhoon/pkg/flow/reconciler/xmltojsontransformation" + "github.com/zeiss/typhoon/pkg/flow/reconciler/xslttransformation" + "github.com/zeiss/typhoon/pkg/routing/reconciler/filter" + "github.com/zeiss/typhoon/pkg/routing/reconciler/splitter" + "github.com/zeiss/typhoon/pkg/sources/reconciler/cloudeventssource" + "github.com/zeiss/typhoon/pkg/sources/reconciler/httppollersource" + "github.com/zeiss/typhoon/pkg/sources/reconciler/kafkasource" + "github.com/zeiss/typhoon/pkg/sources/reconciler/webhooksource" + "github.com/zeiss/typhoon/pkg/targets/reconciler/cloudeventstarget" + "github.com/zeiss/typhoon/pkg/targets/reconciler/httptarget" + "github.com/zeiss/typhoon/pkg/targets/reconciler/kafkatarget" + "github.com/zeiss/typhoon/pkg/targets/reconciler/logzmetricstarget" + "github.com/zeiss/typhoon/pkg/targets/reconciler/logztarget" + "github.com/zeiss/typhoon/pkg/targets/reconciler/splunktarget" +) + +func main() { + ctx := signals.NewContext() + + if namespace, set := os.LookupEnv("WORKING_NAMESPACE"); set { + ctx = injection.WithNamespaceScope(ctx, namespace) + } + + sharedmain.MainWithContext(ctx, "typhoon-controller", + cloudeventssource.NewController, + httppollersource.NewController, + kafkasource.NewController, + webhooksource.NewController, + + cloudeventstarget.NewController, + httptarget.NewController, + kafkatarget.NewController, + logztarget.NewController, + logzmetricstarget.NewController, + splunktarget.NewController, + // flow + jqtransformation.NewController, + synchronizer.NewController, + transformation.NewController, + xmltojsontransformation.NewController, + xslttransformation.NewController, + // extensions + function.NewController, + // routing + filter.NewController, + splitter.NewController, + ) +} diff --git a/go.mod b/go.mod index a06f1ede..17471563 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,9 @@ module github.com/zeiss/typhoon go 1.21.6 require ( + github.com/Shopify/sarama v1.30.0 github.com/ZachtimusPrime/Go-Splunk-HTTP/splunk/v2 v2.0.2 + github.com/basgys/goxml2json v1.1.0 github.com/cloudevents/sdk-go/v2 v2.13.0 github.com/fsnotify/fsnotify v1.7.0 github.com/getkin/kin-openapi v0.123.0 @@ -12,14 +14,18 @@ require ( github.com/golangci/golangci-lint v1.55.2 github.com/google/cel-go v0.17.7 github.com/google/uuid v1.6.0 + github.com/itchyny/gojq v0.12.14 github.com/katallaxie/pkg v0.5.12 github.com/kelseyhightower/envconfig v1.4.0 github.com/logzio/logzio-go v1.0.6 github.com/oapi-codegen/fiber-middleware v1.0.1 github.com/oapi-codegen/runtime v1.1.1 + github.com/sethvargo/go-limiter v0.7.2 github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/tidwall/gjson v1.17.1 + github.com/wamuir/go-xslt v0.1.5 + github.com/xdg-go/scram v1.0.2 go.opencensus.io v0.24.0 go.uber.org/zap v1.26.0 golang.org/x/oauth2 v0.17.0 @@ -64,7 +70,9 @@ require ( github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beeker1121/goque v2.1.0+incompatible // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bitly/go-simplejson v0.5.1 // indirect github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/blendle/zapdriver v1.3.1 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bombsimon/wsl/v3 v3.4.0 // indirect @@ -83,6 +91,9 @@ require ( github.com/daixiang0/gci v0.11.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/eapache/go-resiliency v1.3.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect + github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect @@ -139,6 +150,7 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.6.7 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -146,9 +158,15 @@ require ( github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect + github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgx/v5 v5.4.3 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jgautheron/goconst v1.6.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect @@ -198,6 +216,7 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.4.5 // indirect @@ -211,6 +230,7 @@ require ( github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rickb777/date v1.13.0 // indirect github.com/rickb777/plural v1.2.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect @@ -254,6 +274,8 @@ require ( github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasthttp v1.51.0 // indirect github.com/valyala/tcplisten v1.0.0 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/stringprep v1.0.2 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect @@ -263,6 +285,7 @@ require ( go-simpler.org/sloglint v0.1.2 // indirect go.tmz.dev/musttag v0.7.2 // indirect go.uber.org/atomic v1.10.0 // indirect + go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.19.0 // indirect @@ -286,6 +309,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.4.6 // indirect + k8s.io/apiextensions-apiserver v0.28.5 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect diff --git a/go.sum b/go.sum index 1ca34536..a81a1dc3 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,11 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.30.0 h1:TOZL6r37xJBDEMLx4yjB77jxbZYXPaDow08TSK6vIL0= github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae h1:ePgznFqEG1v3AjMklnK8H7BSc++FDSo7xfK9K7Af+0Y= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= github.com/ZachtimusPrime/Go-Splunk-HTTP/splunk/v2 v2.0.2 h1:HbpRy8SqOR3LNDJzXfMcmmoiJjAYncWYkZhKEKwZ0tE= github.com/ZachtimusPrime/Go-Splunk-HTTP/splunk/v2 v2.0.2/go.mod h1:102UvZ4vog5oDtPyvcgOR/0hKKYPOvo4CFELI9bchvc= @@ -144,6 +147,8 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw= +github.com/basgys/goxml2json v1.1.0/go.mod h1:wH7a5Np/Q4QoECFIU8zTQlZwZkrilY0itPfecMw41Dw= github.com/beeker1121/goque v2.1.0+incompatible h1:m5pZ5b8nqzojS2DF2ioZphFYQUqGYsDORq6uefUItPM= github.com/beeker1121/goque v2.1.0+incompatible/go.mod h1:L6dOWBhDOnxUVQsb0wkLve0VCnt2xJW/MI8pdRX4ANw= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= @@ -153,11 +158,14 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow= +github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= @@ -255,7 +263,12 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= +github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= +github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -293,6 +306,7 @@ github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phm github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= @@ -596,6 +610,8 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -628,17 +644,29 @@ github.com/influxdata/tdigest v0.0.0-20180711151920-a7d76c6f093a/go.mod h1:9Gkys github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/itchyny/gojq v0.12.14 h1:6k8vVtsrhQSYgSGg827AD+PVVaB1NLXEdX+dda2oZCc= +github.com/itchyny/gojq v0.12.14/go.mod h1:y1G7oO7XkcR1LPZO59KyoCRy08T3j9vDYRV0GgYSS+s= +github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= +github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY= github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= +github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA= @@ -857,6 +885,7 @@ github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0V github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -873,6 +902,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -924,6 +955,7 @@ github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4l github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rickb777/date v1.13.0 h1:+8AmwLuY1d/rldzdqvqTEg7107bZ8clW37x4nsdG3Hs= github.com/rickb777/date v1.13.0/go.mod h1:GZf3LoGnxPWjX+/1TXOuzHefZFDovTyNLHDMd3qH70k= @@ -956,6 +988,8 @@ github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSC github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I= github.com/securego/gosec/v2 v2.18.2/go.mod h1:xUuqSF6i0So56Y2wwohWAmB07EdBkUN6crbLlHwbyJs= +github.com/sethvargo/go-limiter v0.7.2 h1:FgC4N7RMpV5gMrUdda15FaFTkQ/L4fEqM7seXMs4oO8= +github.com/sethvargo/go-limiter v0.7.2/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= @@ -1094,9 +1128,14 @@ github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1S github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g= github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/wamuir/go-xslt v0.1.5 h1:FmO1SD7PpoJtHOfnXcb6R/+NANYHX8+mz0UogNJuPnk= +github.com/wamuir/go-xslt v0.1.5/go.mod h1:4TQnJGYG4FeeVIgAnV4tyr5pyZQOpxfEZv6Uby/qikU= github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= @@ -1163,6 +1202,8 @@ go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -1195,6 +1236,7 @@ golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= @@ -1305,6 +1347,7 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= @@ -1795,6 +1838,8 @@ k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= +k8s.io/apiextensions-apiserver v0.28.5 h1:YKW9O9T/0Gkyl6LTFDLIhCbouSRh+pHt2vMLB38Snfc= +k8s.io/apiextensions-apiserver v0.28.5/go.mod h1:7p7TQ0X9zCJLNFlOTi5dncAi2dkPsdsrcvu5ILa7PEk= k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= diff --git a/pkg/extensions/reconciler/function/adapter.go b/pkg/extensions/reconciler/function/adapter.go new file mode 100644 index 00000000..c6e7d87d --- /dev/null +++ b/pkg/extensions/reconciler/function/adapter.go @@ -0,0 +1,220 @@ +package function + +import ( + "os" + "path/filepath" + "sort" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + codeVersionAnnotation = "extensions.typhoon.zeiss.com/codeVersion" + codeCmapVolName = "code" +) + +const klrEntrypoint = "/opt/aws-custom-runtime" + +const ( + eventStoreEnv = "EVENTSTORE_URI" + runtimeEnvPrefix = "RUNTIME_" +) + +// adapterConfig contains properties used to configure the Function's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(rcl commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + f := rcl.(*v1alpha1.Function) + + var cmapName string + var cmapRev string + if codeCmap := f.Status.ConfigMap; codeCmap != nil { + cmapName = codeCmap.Name + cmapRev = codeCmap.ResourceVersion + } + + srcCodePath := filepath.Join("/opt", "source."+fileExtension(f.Spec.Runtime)) + srcCodeVol, srcCodeVolMount := sourceCodeVolumeAndMount(srcCodePath, cmapName) + + return common.NewAdapterKnService(rcl, sinkURI, + resource.Image(lookupRuntimeImage(f.Spec.Runtime)), + + resource.Annotation(codeVersionAnnotation, cmapRev), + resource.Label(functionNameLabel, f.Name), + + resource.EnvVars(MakeAppEnv(f)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + resource.EntrypointCommand(klrEntrypoint), + + resource.Volumes(srcCodeVol), + resource.VolumeMounts(srcCodeVolMount), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(f *v1alpha1.Function) []corev1.EnvVar { + var responseMode string + if f.Spec.ResponseIsEvent { + responseMode = "event" + } + + ceOverrides := map[string]string{ + // Default values for required attributes + "type": f.GetEventTypes()[0], + "source": f.AsEventSource(), + } + + if f.Spec.CloudEventOverrides != nil { + for k, v := range f.Spec.CloudEventOverrides.Extensions { + if k != "type" && k != "source" { + ceOverrides[k] = v + } + } + } + + return append([]corev1.EnvVar{ + { + Name: eventStoreEnv, + Value: f.Spec.EventStore.URI, + }, + { + Name: "_HANDLER", + Value: "source." + f.Spec.Entrypoint, + }, + { + Name: "RESPONSE_FORMAT", + Value: "CLOUDEVENTS", + }, + { + Name: "CE_FUNCTION_RESPONSE_MODE", + Value: responseMode, + }, + { + Name: "INTERNAL_API_PORT", + Value: "8088", + }, + }, sortedEnvVarsWithPrefix("CE_OVERRIDES_", ceOverrides)...) +} + +// Lambda runtimes require file extensions to match the language, +// i.e. source file for Python runtime must have ".py" prefix, JavaScript - ".js", etc. +// It would be more correct to declare these extensions explicitly, +// along with the runtime container URIs, but since we manage the +// available runtimes list, this also works. +func fileExtension(runtime string) string { + runtime = strings.ToLower(runtime) + switch { + case strings.Contains(runtime, "python"): + return "py" + case strings.Contains(runtime, "node") || + strings.Contains(runtime, "js"): + return "js" + case strings.Contains(runtime, "ruby"): + return "rb" + case strings.Contains(runtime, "sh"): + return "sh" + } + return "txt" +} + +// Env variables from extensions override map are sorted alphabetically before +// passing to container env to prevent reconciliation loop when map keys are randomized. +func sortedEnvVarsWithPrefix(prefix string, overrides map[string]string) []corev1.EnvVar { + keys := make([]string, 0, len(overrides)) + for key := range overrides { + keys = append(keys, key) + } + sort.Strings(keys) + + res := make([]corev1.EnvVar, 0, len(keys)) + for _, key := range keys { + res = append(res, corev1.EnvVar{ + Name: strings.ToUpper(prefix + key), + Value: overrides[key], + }) + } + + return res +} + +var ( + // guards initialization by initRuntimes, which populates runtimes + runtimesOnce sync.Once + // runtime names and associated container images + runtimes map[string]string +) + +func initRuntimes() { + runtimes = make(map[string]string) + for _, e := range os.Environ() { + if !strings.HasPrefix(e, runtimeEnvPrefix) { + continue + } + e = strings.TrimPrefix(e, runtimeEnvPrefix) + runtimePairs := strings.SplitN(e, "=", 2) + runtimes[runtimePairs[0]] = runtimePairs[1] + } +} + +func lookupRuntimeImage(runtime string) string { + rn := strings.ToLower(runtime) + + runtimesOnce.Do(initRuntimes) + + for name, img := range runtimes { + name = strings.ToLower(name) + if strings.Contains(name, rn) { + return img + } + } + + return "" +} + +// sourceCodeVolumeAndMount returns a ConfigMap-based volume and corresponding +// mount for the Function's source code. +func sourceCodeVolumeAndMount(mountPath, cmName string) (corev1.Volume, corev1.VolumeMount) { + v := corev1.Volume{ + Name: codeCmapVolName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cmName, + }, + Items: []corev1.KeyToPath{{ + Key: codeCmapDataKey, + Path: filepath.Base(mountPath), + }}, + }, + }, + } + + vm := corev1.VolumeMount{ + Name: codeCmapVolName, + ReadOnly: true, + MountPath: mountPath, + SubPath: filepath.Base(mountPath), + } + + return v, vm +} diff --git a/pkg/extensions/reconciler/function/cmap.go b/pkg/extensions/reconciler/function/cmap.go new file mode 100644 index 00000000..bd4efd0f --- /dev/null +++ b/pkg/extensions/reconciler/function/cmap.go @@ -0,0 +1,177 @@ +package function + +import ( + "context" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/kmeta" + "knative.dev/pkg/reconciler" + "knative.dev/pkg/tracker" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/event" + "github.com/zeiss/typhoon/pkg/reconciler/resource" + "github.com/zeiss/typhoon/pkg/reconciler/skip" +) + +const functionNameLabel = "extensions.typhoon.zeiss.com/function" + +const codeCmapDataKey = "code" + +// appInstanceLabel is a unique name identifying the instance of an application. +// See Kubernetes recommended labels +// https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +const appInstanceLabel = "app.kubernetes.io/instance" + +var configMapGVK = corev1.SchemeGroupVersion.WithKind("ConfigMap") + +func (r *Reconciler) reconcileConfigmap(ctx context.Context) error { + if skip.Skip(ctx) { + return nil + } + + f := commonv1alpha1.ReconcilableFromContext(ctx).(*v1alpha1.Function) + status := &f.Status + + desiredCmap := newCodeConfigMap(f) + + currentCmap, err := r.getOrCreateCodeConfigMap(ctx, desiredCmap) + if err != nil { + status.MarkConfigMapUnavailable(v1alpha1.FunctionReasonFailedSync, fmt.Sprintf( + "Failed to get or create code ConfigMap: %s", err)) + return err + } + + if currentCmap, err = r.syncCodeConfigMap(ctx, currentCmap, desiredCmap); err != nil { + status.MarkConfigMapUnavailable(v1alpha1.FunctionReasonFailedSync, fmt.Sprintf( + "Failed to synchronize code ConfigMap: %s", err)) + return fmt.Errorf("synchronizing code ConfigMap: %w", err) + } + + cmRef := tracker.Reference{ + APIVersion: configMapGVK.GroupVersion().String(), + Kind: configMapGVK.Kind, + Name: currentCmap.Name, + Namespace: currentCmap.Namespace, + } + + if err := r.tracker.TrackReference(cmRef, f); err != nil { + return fmt.Errorf("tracking changes to code ConfigMap: %w", err) + } + + status.MarkConfigMapAvailable(currentCmap.Name, currentCmap.ResourceVersion) + + return nil +} + +// getOrCreateCodeConfigMap returns the existing code ConfigMap for a given +// Function instance, or creates it if it is missing. +func (r *Reconciler) getOrCreateCodeConfigMap(ctx context.Context, + desiredCmap *corev1.ConfigMap, +) (*corev1.ConfigMap, error) { + rcl := commonv1alpha1.ReconcilableFromContext(ctx) + + cmap, err := r.findCodeConfigMap(rcl, metav1.GetControllerOfNoCopy(desiredCmap)) + switch { + case apierrors.IsNotFound(err): + cmap, err = r.cmCli(desiredCmap.Namespace).Create(ctx, desiredCmap, metav1.CreateOptions{}) + if err != nil { + return nil, reconciler.NewEvent(corev1.EventTypeWarning, "FailedConfigMapCreate", + "Failed to create code ConfigMap %q: %s", desiredCmap.Name, err) + } + event.Normal(ctx, "CreateConfigMap", "Created code ConfigMap %q", cmap.Name) + + case err != nil: + return nil, fmt.Errorf("getting code ConfigMap from cache: %w", err) + } + + return cmap, nil +} + +// syncCodeConfigMap synchronizes the desired state of a Function's code +// ConfigMap against its current state in the running cluster. +func (r *Reconciler) syncCodeConfigMap(ctx context.Context, + currentCmap, desiredCmap *corev1.ConfigMap, +) (*corev1.ConfigMap, error) { + if reflect.DeepEqual(desiredCmap.Data, currentCmap.Data) { + return currentCmap, nil + } + + // resourceVersion must be returned to the API server unmodified for + // optimistic concurrency, as per Kubernetes API conventions + desiredCmap.ResourceVersion = currentCmap.ResourceVersion + + cmap, err := r.cmCli(desiredCmap.Namespace).Update(ctx, desiredCmap, metav1.UpdateOptions{}) + if err != nil { + return nil, reconciler.NewEvent(corev1.EventTypeWarning, "FailedConfigMapUpdate", + "Failed to update code ConfigMap %q: %s", desiredCmap.Name, err) + } + event.Normal(ctx, "UpdateConfigMap", "Updated code ConfigMap %q", cmap.Name) + + return cmap, nil +} + +// findCodeConfigMap returns the ConfigMap containing the code of the given +// Function instance if it exists. +func (r *Reconciler) findCodeConfigMap(rcl commonv1alpha1.Reconcilable, + owner *metav1.OwnerReference, +) (*corev1.ConfigMap, error) { + ls := common.CommonObjectLabels(rcl) + + // the combination of standard labels {name,instance} is unique + // and immutable for single-tenant components + ls[appInstanceLabel] = rcl.GetName() + + sel := labels.SelectorFromValidatedSet(ls) + + cmaps, err := r.cmLister(rcl.GetNamespace()).List(sel) + if err != nil { + return nil, err + } + + for _, cmap := range cmaps { + cmapOwner := metav1.GetControllerOfNoCopy(cmap) + + if cmapOwner.UID == owner.UID { + return cmap, nil + } + } + + gr := corev1.Resource("configmaps") + + return nil, newNotFoundForSelector(gr, sel) +} + +// newNotFoundForSelector returns an error which indicates that no object of +// the type matching the given GroupResource was found for the given label +// selector. +func newNotFoundForSelector(gr schema.GroupResource, sel labels.Selector) *apierrors.StatusError { + err := apierrors.NewNotFound(gr, "") + err.ErrStatus.Message = fmt.Sprint(gr, " not found for selector ", sel) + return err +} + +// newCodeConfigMap returns a ConfigMap object containing the code of the given Function. +func newCodeConfigMap(f *v1alpha1.Function) *corev1.ConfigMap { + ns := f.Namespace + name := f.Name + + return resource.NewConfigMap(ns, kmeta.ChildName(common.ComponentName(f)+"-code-", name), + resource.Controller(f), + + resource.Labels(common.CommonObjectLabels(f)), + resource.Label(appInstanceLabel, name), + resource.Label(functionNameLabel, name), + + resource.Data(codeCmapDataKey, f.Spec.Code), + ) +} diff --git a/pkg/extensions/reconciler/function/controller.go b/pkg/extensions/reconciler/function/controller.go new file mode 100644 index 00000000..23db5646 --- /dev/null +++ b/pkg/extensions/reconciler/function/controller.go @@ -0,0 +1,58 @@ +package function + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + k8sclient "knative.dev/pkg/client/injection/kube/client" + cminformerv1 "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/extensions/v1alpha1/function" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/extensions/v1alpha1/function" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.Function)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + cmLister: cminformerv1.Get(ctx).Lister().ConfigMaps, + cmCli: k8sclient.Get(ctx).CoreV1().ConfigMaps, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.Function]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().Functions, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + r.tracker = impl.Tracker + + return impl +} diff --git a/pkg/extensions/reconciler/function/reconciler.go b/pkg/extensions/reconciler/function/reconciler.go new file mode 100644 index 00000000..eb536224 --- /dev/null +++ b/pkg/extensions/reconciler/function/reconciler.go @@ -0,0 +1,45 @@ +package function + +import ( + "context" + "fmt" + + typedv1 "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + + "knative.dev/pkg/reconciler" + "knative.dev/pkg/tracker" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/extensions/v1alpha1/function" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/extensions/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.Function, listersv1alpha1.FunctionNamespaceLister] + adapterCfg *adapterConfig + + cmLister func(namespace string) corev1listers.ConfigMapNamespaceLister + cmCli func(namespace string) typedv1.ConfigMapInterface + + // tracker allows reacting to changes in code ConfigMaps + tracker tracker.Interface +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, f *v1alpha1.Function) reconciler.Event { + // inject Function into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, f) + + if err := r.reconcileConfigmap(ctx); err != nil { + return fmt.Errorf("failed to reconcile code ConfigMap: %w", err) + } + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/flow/adapter/jqtransformation/adapter.go b/pkg/flow/adapter/jqtransformation/adapter.go new file mode 100644 index 00000000..92957201 --- /dev/null +++ b/pkg/flow/adapter/jqtransformation/adapter.go @@ -0,0 +1,118 @@ +package jqtransformation + +import ( + "context" + "encoding/json" + + "github.com/itchyny/gojq" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +// NewAdapter adapter implementation +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.JQTransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + replier, err := targetce.New(env.Component, logger.Named("replier"), + targetce.ReplierWithStatefulHeaders(env.BridgeIdentifier), + targetce.ReplierWithStaticResponseType("com.zeiss.jqtransformation.error"), + targetce.ReplierWithPayloadPolicy(targetce.PayloadPolicy(env.CloudEventPayloadPolicy))) + if err != nil { + logger.Panicf("Error creating CloudEvents replier: %v", err) + } + + query, err := gojq.Parse(env.Query) + if err != nil { + logger.Panicf("Error creating query: %v", err) + } + + return &jqadapter{ + query: query, + + sink: env.Sink, + replier: replier, + ceClient: ceClient, + logger: logger, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + } +} + +var _ pkgadapter.Adapter = (*jqadapter)(nil) + +type jqadapter struct { + query *gojq.Query + + sink string + replier *targetce.Replier + ceClient cloudevents.Client + logger *zap.SugaredLogger + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter +} + +// Start is a blocking function and will return if an error occurs +// or the context is cancelled. +func (a *jqadapter) Start(ctx context.Context) error { + a.logger.Info("Starting JQTransformation Adapter") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *jqadapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + var data interface{} + var qd interface{} + if err := event.DataAs(&data); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + + iter := a.query.Run(data) + for { + v, ok := iter.Next() + if !ok { + break + } + if err, ok := v.(error); ok { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + qd = v + } + + // Reserialize the query results for the response + bs, err := json.Marshal(&qd) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + + if err := event.SetData(cloudevents.ApplicationJSON, bs); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if a.sink != "" { + if result := a.ceClient.Send(ctx, event); !cloudevents.IsACK(result) { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, result, "sending the cloudevent to the sink") + } + return nil, cloudevents.ResultACK + } + + return &event, cloudevents.ResultACK +} diff --git a/pkg/flow/adapter/jqtransformation/config.go b/pkg/flow/adapter/jqtransformation/config.go new file mode 100644 index 00000000..3960a24a --- /dev/null +++ b/pkg/flow/adapter/jqtransformation/config.go @@ -0,0 +1,21 @@ +package jqtransformation + +import pkgadapter "knative.dev/eventing/pkg/adapter/v2" + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + // Query represents the jq query to be applied to the incoming event + Query string `envconfig:"JQ_QUERY" required:"true"` + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` + // CloudEvents responses parametrization + CloudEventPayloadPolicy string `envconfig:"EVENTS_PAYLOAD_POLICY" default:"error"` + // Sink defines the target sink for the events. If no Sink is defined the + // events are replied back to the sender. + Sink string `envconfig:"K_SINK"` +} diff --git a/pkg/flow/adapter/synchronizer/adapter.go b/pkg/flow/adapter/synchronizer/adapter.go new file mode 100644 index 00000000..1f7916a3 --- /dev/null +++ b/pkg/flow/adapter/synchronizer/adapter.go @@ -0,0 +1,160 @@ +package synchronizer + +import ( + "context" + "fmt" + "net/http" + "time" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +var _ pkgadapter.Adapter = (*adapter)(nil) + +type adapter struct { + ceClient cloudevents.Client + logger *zap.SugaredLogger + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter + + correlationKey *correlationKey + responseTimeout time.Duration + + sessions *storage + sinkURL string + bridgeID string +} + +// NewAdapter returns adapter implementation. +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.SynchronizerResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + key, err := newCorrelationKey(env.CorrelationKey, env.CorrelationKeyLength) + if err != nil { + logger.Panic("Cannot create an instance of Correlation Key: %v", err) + } + + return &adapter{ + ceClient: ceClient, + logger: logger, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + + correlationKey: key, + responseTimeout: env.ResponseWaitTimeout, + + sessions: newStorage(), + sinkURL: env.Sink, + bridgeID: env.BridgeIdentifier, + } +} + +// Returns if stopCh is closed or Send() returns an error. +func (a *adapter) Start(ctx context.Context) error { + a.logger.Info("Starting Synchronizer Adapter") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *adapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + a.logger.Debugf("Received the event: %s", event.String()) + + if correlationID, exists := a.correlationKey.get(event); exists { + return a.serveResponse(ctx, correlationID, event) + } + + correlationID := a.correlationKey.set(&event) + return a.serveRequest(ctx, correlationID, event) +} + +// serveRequest creates the session for the incoming events and blocks the client. +func (a *adapter) serveRequest(ctx context.Context, correlationID string, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + a.logger.Debugf("Handling request %q", correlationID) + + respChan, err := a.sessions.add(correlationID) + if err != nil { + return nil, cloudevents.NewHTTPResult(http.StatusInternalServerError, "cannot add session %q: %w", correlationID, err) + } + defer a.sessions.delete(correlationID) + + sendErr := make(chan error) + defer close(sendErr) + + go func() { + if res := a.ceClient.Send(cloudevents.ContextWithTarget(ctx, a.sinkURL), a.withBridgeIdentifier(&event)); cloudevents.IsUndelivered(res) { + sendErr <- res + } + }() + + a.logger.Debugf("Waiting response for %q", correlationID) + + select { + case err := <-sendErr: + a.logger.Errorw("Unable to forward the request", zap.Error(err)) + return nil, cloudevents.NewHTTPResult(http.StatusBadRequest, "unable to forward the request: %v", err) + case result := <-respChan: + if result == nil { + a.logger.Errorw("No response", zap.Error(fmt.Errorf("response channel with ID %q is closed", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusInternalServerError, "failed to communicate the response") + } + a.logger.Debugf("Received response for %q", correlationID) + res := a.withBridgeIdentifier(result) + return &res, cloudevents.ResultACK + case <-time.After(a.responseTimeout): + a.logger.Errorw("Request time out", zap.Error(fmt.Errorf("request %q did not receive backend response in time", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusGatewayTimeout, "backend did not respond in time") + } +} + +// serveResponse matches event's correlation key and writes response back to the session's communication channel. +func (a *adapter) serveResponse(ctx context.Context, correlationID string, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + a.logger.Debugf("Handling response %q", correlationID) + + responseChan, exists := a.sessions.get(correlationID) + if !exists { + a.logger.Errorw("Session not found", zap.Error(fmt.Errorf("client session with ID %q does not exist", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusBadGateway, "client session does not exist") + } + + a.logger.Debugf("Forwarding response %q", correlationID) + select { + case responseChan <- &event: + a.logger.Debugf("Response %q completed", correlationID) + return nil, cloudevents.ResultACK + default: + a.logger.Errorw("Unable to forward the response", zap.Error(fmt.Errorf("client connection with ID %q is closed", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusBadGateway, "client connection is closed") + } +} + +// withBridgeIdentifier adds Bridge ID to the event context. +func (a *adapter) withBridgeIdentifier(event *cloudevents.Event) cloudevents.Event { + if a.bridgeID == "" { + return *event + } + if bid, err := event.Context.GetExtension(targetce.StatefulWorkflowHeader); err != nil && bid != "" { + return *event + } + event.SetExtension(targetce.StatefulWorkflowHeader, a.bridgeID) + return *event +} diff --git a/pkg/flow/adapter/synchronizer/correlation.go b/pkg/flow/adapter/synchronizer/correlation.go new file mode 100644 index 00000000..74e635fc --- /dev/null +++ b/pkg/flow/adapter/synchronizer/correlation.go @@ -0,0 +1,75 @@ +package synchronizer + +import ( + "fmt" + "math/rand" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2/event" +) + +// Correlation Key charset. +const correlationKeycharset = "abcdefghijklmnopqrstuvwxyz0123456789" + +var ( + // CloudEvent attributes cannot be used as a correltaion key. + restrictedKeys = []string{ + "id", + "type", + "time", + "subject", + "schemaurl", + "dataschema", + "specversion", + "datamediatype", + "datacontenttype", + "datacontentencoding", + } + + seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano())) +) + +// correlationKey is the correlation attribute for the CloudEvents. +type correlationKey struct { + attribute string + length int +} + +// NewCorrelationKey returns an instance of the CloudEvent Correlation key. +func newCorrelationKey(attribute string, length int) (*correlationKey, error) { + for _, rk := range restrictedKeys { + if attribute == rk { + return nil, fmt.Errorf("%q cannot be used as a correlation key", attribute) + } + } + + return &correlationKey{ + attribute: attribute, + length: length, + }, nil +} + +// Get returns the value of Correlation Key. +func (k *correlationKey) get(event cloudevents.Event) (string, bool) { + if val, exists := event.Extensions()[k.attribute]; exists { + return val.(string), true + } + return "", false +} + +// Set updates the CloudEvent's context with the random Correlation Key value. +func (k *correlationKey) set(event *cloudevents.Event) string { + correlationID := randString(k.length) + event.SetExtension(k.attribute, correlationID) + return correlationID +} + +// randString generates the random string with fixed length. +func randString(length int) string { + k := make([]byte, length) + l := len(correlationKeycharset) - 1 + for i := range k { + k[i] = correlationKeycharset[seededRand.Intn(l)] + } + return string(k) +} diff --git a/pkg/flow/adapter/synchronizer/env.go b/pkg/flow/adapter/synchronizer/env.go new file mode 100644 index 00000000..94f91820 --- /dev/null +++ b/pkg/flow/adapter/synchronizer/env.go @@ -0,0 +1,23 @@ +package synchronizer + +import ( + "time" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" +) + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + + CorrelationKey string `envconfig:"CORRELATION_KEY"` + CorrelationKeyLength int `envconfig:"CORRELATION_KEY_LENGTH"` + ResponseWaitTimeout time.Duration `envconfig:"RESPONSE_WAIT_TIMEOUT"` + + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` +} diff --git a/pkg/flow/adapter/synchronizer/storage.go b/pkg/flow/adapter/synchronizer/storage.go new file mode 100644 index 00000000..151fee7e --- /dev/null +++ b/pkg/flow/adapter/synchronizer/storage.go @@ -0,0 +1,53 @@ +package synchronizer + +import ( + "fmt" + "sync" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +// storage holds the map of open connections and corresponding channels. +type storage struct { + sync.Mutex + sessions map[string]chan *cloudevents.Event +} + +// newStorage returns an instance of the sessions storage. +func newStorage() *storage { + return &storage{ + sessions: make(map[string]chan *cloudevents.Event), + } +} + +// add creates the new communication channel and adds it to the session storage. +func (s *storage) add(id string) (<-chan *cloudevents.Event, error) { + s.Lock() + defer s.Unlock() + + if _, exists := s.sessions[id]; exists { + return nil, fmt.Errorf("session already exists") + } + + c := make(chan *cloudevents.Event) + s.sessions[id] = c + return c, nil +} + +// delete closes the communication channel and removes it from the storage. +func (s *storage) delete(id string) { + s.Lock() + defer s.Unlock() + + close(s.sessions[id]) + delete(s.sessions, id) +} + +// open returns the communication channel for the session id. +func (s *storage) get(id string) (chan<- *cloudevents.Event, bool) { + s.Lock() + defer s.Unlock() + + session, exists := s.sessions[id] + return session, exists +} diff --git a/pkg/flow/adapter/transformation/adapter.go b/pkg/flow/adapter/transformation/adapter.go new file mode 100644 index 00000000..c6997f40 --- /dev/null +++ b/pkg/flow/adapter/transformation/adapter.go @@ -0,0 +1,240 @@ +package transformation + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/metrics" +) + +type envConfig struct { + pkgadapter.EnvConfig + + // Sink URL where to send cloudevents + Sink string `envconfig:"K_SINK"` + + // Transformation specifications + TransformationContext string `envconfig:"TRANSFORMATION_CONTEXT"` + TransformationData string `envconfig:"TRANSFORMATION_DATA"` +} + +// adapter contains Pipelines for CE transformations and CloudEvents client. +type adapter struct { + ContextPipeline *Pipeline + DataPipeline *Pipeline + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter + + sink string + + client cloudevents.Client + logger *zap.SugaredLogger +} + +// ceContext represents CloudEvents context structure but with exported Extensions. +type ceContext struct { + *cloudevents.EventContextV1 `json:",inline"` + Extensions map[string]interface{} `json:"Extensions,omitempty"` +} + +// NewEnvConfig satisfies pkgadapter.EnvConfigConstructor. +func NewEnvConfig() pkgadapter.EnvConfigAccessor { + return &envConfig{} +} + +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.TransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envConfig) + + trnContext, trnData := []v1alpha1.Transform{}, []v1alpha1.Transform{} + err := json.Unmarshal([]byte(env.TransformationContext), &trnContext) + if err != nil { + logger.Fatalf("Cannot unmarshal context transformation env variable: %v", err) + } + err = json.Unmarshal([]byte(env.TransformationData), &trnData) + if err != nil { + logger.Fatalf("Cannot unmarshal data transformation env variable: %v", err) + } + + sharedStorage := storage.New() + + contextPl, err := newPipeline(trnContext, sharedStorage) + if err != nil { + logger.Fatalf("Cannot create context transformation pipeline: %v", err) + } + + dataPl, err := newPipeline(trnData, sharedStorage) + if err != nil { + logger.Fatalf("Cannot create data transformation pipeline: %v", err) + } + + return &adapter{ + ContextPipeline: contextPl, + DataPipeline: dataPl, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + + sink: env.Sink, + client: ceClient, + logger: logger, + } +} + +// Start runs CloudEvent receiver and applies transformation Pipeline +// on incoming events. +func (t *adapter) Start(ctx context.Context) error { + t.logger.Info("Starting Transformation adapter") + + var receiver interface{} + receiver = t.receiveAndReply + if t.sink != "" { + ctx = cloudevents.ContextWithTarget(ctx, t.sink) + receiver = t.receiveAndSend + } + + ctx = pkgadapter.ContextWithMetricTag(ctx, t.mt) + + return t.client.StartReceiver(ctx, receiver) +} + +func (t *adapter) receiveAndReply(event cloudevents.Event) (*cloudevents.Event, error) { + ceTypeTag := metrics.TagEventType(event.Type()) + ceSrcTag := metrics.TagEventSource(event.Source()) + + start := time.Now() + defer func() { + t.sr.ReportProcessingLatency(time.Since(start), ceTypeTag, ceSrcTag) + }() + + result, err := t.applyTransformations(event) + if err != nil { + t.sr.ReportProcessingError(false, ceTypeTag, ceSrcTag) + } else { + t.sr.ReportProcessingSuccess(ceTypeTag, ceSrcTag) + } + + return result, err +} + +func (t *adapter) receiveAndSend(ctx context.Context, event cloudevents.Event) error { + ceTypeTag := metrics.TagEventType(event.Type()) + ceSrcTag := metrics.TagEventSource(event.Source()) + + start := time.Now() + defer func() { + t.sr.ReportProcessingLatency(time.Since(start), ceTypeTag, ceSrcTag) + }() + + result, err := t.applyTransformations(event) + if err != nil { + t.sr.ReportProcessingError(false, ceTypeTag, ceSrcTag) + return err + } + + if result := t.client.Send(ctx, *result); !cloudevents.IsACK(result) { + t.sr.ReportProcessingError(false, ceTypeTag, ceSrcTag) + return result + } + + t.sr.ReportProcessingSuccess(ceTypeTag, ceSrcTag) + return nil +} + +func (t *adapter) applyTransformations(event cloudevents.Event) (*cloudevents.Event, error) { + // HTTPTargets sets content type from HTTP headers, i.e.: + // "datacontenttype: application/json; charset=utf-8" + // so we must use "contains" instead of strict equality + if !strings.Contains(event.DataContentType(), cloudevents.ApplicationJSON) { + err := fmt.Errorf("CE Content-Type %q is not supported", event.DataContentType()) + t.logger.Errorw("Bad Content-Type", zap.Error(err)) + return nil, err + } + + localContext := ceContext{ + EventContextV1: event.Context.AsV1(), + Extensions: event.Context.AsV1().GetExtensions(), + } + + localContextBytes, err := json.Marshal(localContext) + if err != nil { + t.logger.Errorw("Cannot encode CE context", zap.Error(err)) + return nil, fmt.Errorf("cannot encode CE context: %w", err) + } + + // init indicates if we need to run initial step transformation + init := true + var errs []error + + eventUniqueID := fmt.Sprintf("%s-%s", event.ID(), event.Source()) + + // remove event-related variables after the transformation is done. + // since the storage is shared, flush can be done for one pipeline. + defer t.ContextPipeline.Storage.Flush(eventUniqueID) + + // Run init step such as load Pipeline variables first + eventContext, err := t.ContextPipeline.apply(eventUniqueID, localContextBytes, init) + if err != nil { + errs = append(errs, err) + } + eventPayload, err := t.DataPipeline.apply(eventUniqueID, event.Data(), init) + if err != nil { + errs = append(errs, err) + } + + // CE Context transformation + if eventContext, err = t.ContextPipeline.apply(eventUniqueID, eventContext, !init); err != nil { + errs = append(errs, err) + } + + newContext := ceContext{} + if err := json.Unmarshal(eventContext, &newContext); err != nil { + t.logger.Errorw("Cannot decode CE new context", zap.Error(err)) + return nil, fmt.Errorf("cannot decode CE new context: %w", err) + } + event.Context = newContext + for k, v := range newContext.Extensions { + if err := event.Context.SetExtension(k, v); err != nil { + t.logger.Errorw("Cannot set CE extension", zap.Error(err)) + return nil, fmt.Errorf("cannot set CE extension: %w", err) + } + } + + // CE Data transformation + if eventPayload, err = t.DataPipeline.apply(eventUniqueID, eventPayload, !init); err != nil { + errs = append(errs, err) + } + if err = event.SetData(cloudevents.ApplicationJSON, eventPayload); err != nil { + t.logger.Errorw("Cannot set CE data", zap.Error(err)) + return nil, fmt.Errorf("cannot set CE data: %w", err) + } + // Failed transformation operations should not stop event flow + // therefore, just log the errors + if len(errs) != 0 { + t.logger.Errorw("Event transformation errors", zap.Errors("errors", errs)) + } + + return &event, nil +} diff --git a/pkg/flow/adapter/transformation/common/convert/convert.go b/pkg/flow/adapter/transformation/common/convert/convert.go new file mode 100644 index 00000000..7f757267 --- /dev/null +++ b/pkg/flow/adapter/transformation/common/convert/convert.go @@ -0,0 +1,104 @@ +package convert + +import ( + "strconv" + "strings" +) + +// SliceToMap converts string slice into map that can be encoded into JSON. +func SliceToMap(path []string, value interface{}) map[string]interface{} { + var array bool + var index int + i := strings.Index(path[0], "[") + if i > -1 && len(path[0]) > i+1 { + indexStr := path[0][i+1 : len(path[0])-1] + indexInt, err := strconv.Atoi(indexStr) + if err == nil { + index = indexInt + array = true + path[0] = path[0][:i] + } + } + + if len(path) == 1 { + if !array { + return map[string]interface{}{ + path[0]: value, + } + } + arr := make([]interface{}, index+1) + arr[index] = value + return map[string]interface{}{ + path[0]: arr, + } + } + + key := path[0] + path = path[1:] + m := SliceToMap(path, value) + if !array { + return map[string]interface{}{ + key: m, + } + } + arr := make([]interface{}, index+1) + arr[index] = m + return map[string]interface{}{ + key: arr, + } +} + +// MergeJSONWithMap accepts interface (effectively, JSON) and a map and merges them together. +// Source map keys are being overwritten by appendix keys if they overlap. +func MergeJSONWithMap(source, appendix interface{}) interface{} { + switch appendixValue := appendix.(type) { + case nil: + return source + case float64, bool, string: + return appendixValue + case []interface{}: + sourceInterface, ok := source.([]interface{}) + if !ok { + return appendixValue + } + resArrLen := len(sourceInterface) + if len(appendixValue) > resArrLen { + resArrLen = len(appendixValue) + } + resArr := make([]interface{}, resArrLen) + for i := range resArr { + var a, b interface{} + if i < len(appendixValue) { + b = appendixValue[i] + } + if i < len(sourceInterface) { + a = sourceInterface[i] + } + resArr[i] = MergeJSONWithMap(a, b) + } + source = resArr + case map[string]interface{}: + switch s := source.(type) { + case float64, bool, string: + return appendixValue + case nil: + source = make(map[string]interface{}) + return MergeJSONWithMap(source, appendixValue) + case map[string]interface{}: + for k, v := range appendixValue { + if k == "" { + return MergeJSONWithMap(s, v) + } + s[k] = MergeJSONWithMap(s[k], v) + } + source = s + case []interface{}: + for k, v := range appendixValue { + if k == "" { + return MergeJSONWithMap(s, v) + } + } + } + } + return source +} diff --git a/pkg/flow/adapter/transformation/common/storage/storage.go b/pkg/flow/adapter/transformation/common/storage/storage.go new file mode 100644 index 00000000..08b92c07 --- /dev/null +++ b/pkg/flow/adapter/transformation/common/storage/storage.go @@ -0,0 +1,69 @@ +package storage + +import ( + "sync" +) + +// Storage is a simple object that provides thread safe +// methods to read and write into a map. +type Storage struct { + data map[string]map[string]interface{} + mux sync.RWMutex +} + +// New returns an instance of Storage. +func New() *Storage { + return &Storage{ + data: make(map[string]map[string]interface{}), + mux: sync.RWMutex{}, + } +} + +// Set writes a value interface to a string key. +func (s *Storage) Set(eventID, key string, value interface{}) { + s.mux.Lock() + defer s.mux.Unlock() + if s.data[eventID] == nil { + s.data[eventID] = make(map[string]interface{}) + } + s.data[eventID][key] = value +} + +// Get reads value by a key. +func (s *Storage) Get(eventID string, key string) interface{} { + s.mux.RLock() + defer s.mux.RUnlock() + if s.data[eventID] == nil { + return nil + } + return s.data[eventID][key] +} + +// ListEventVariables returns the slice of variables created for EventID. +func (s *Storage) ListEventVariables(eventID string) []string { + s.mux.RLock() + defer s.mux.RUnlock() + list := []string{} + for k := range s.data[eventID] { + list = append(list, k) + } + return list +} + +// ListEventIDs returns the list of stored event IDs. +func (s *Storage) ListEventIDs() []string { + s.mux.RLock() + defer s.mux.RUnlock() + list := []string{} + for k := range s.data { + list = append(list, k) + } + return list +} + +// Flush removes variables by their parent event ID. +func (s *Storage) Flush(eventID string) { + s.mux.Lock() + defer s.mux.Unlock() + delete(s.data, eventID) +} diff --git a/pkg/flow/adapter/transformation/common/utils.go b/pkg/flow/adapter/transformation/common/utils.go new file mode 100644 index 00000000..f93aaf36 --- /dev/null +++ b/pkg/flow/adapter/transformation/common/utils.go @@ -0,0 +1,55 @@ +package common + +// ReadValue returns the source object item located at the requested path. +func ReadValue(source interface{}, path map[string]interface{}) interface{} { + var result interface{} + for k, v := range path { + switch value := v.(type) { + case float64, bool, string: + sourceMap, ok := source.(map[string]interface{}) + if !ok { + break + } + result = sourceMap[k] + case []interface{}: + if k != "" { + // array is inside the object + // {"foo":[{},{},{}]} + sourceMap, ok := source.(map[string]interface{}) + if !ok { + break + } + source, ok = sourceMap[k] + if !ok { + break + } + } + // array is a root object + // [{},{},{}] + sourceArr, ok := source.([]interface{}) + if !ok { + break + } + + index := len(value) - 1 + if index >= len(sourceArr) { + break + } + result = ReadValue(sourceArr[index], value[index].(map[string]interface{})) + case map[string]interface{}: + if k == "" { + result = source + break + } + sourceMap, ok := source.(map[string]interface{}) + if !ok { + break + } + if _, ok := sourceMap[k]; !ok { + break + } + result = ReadValue(sourceMap[k], value) + } + } + return result +} diff --git a/pkg/flow/adapter/transformation/pipeline.go b/pkg/flow/adapter/transformation/pipeline.go new file mode 100644 index 00000000..dbc6708a --- /dev/null +++ b/pkg/flow/adapter/transformation/pipeline.go @@ -0,0 +1,83 @@ +package transformation + +import ( + "fmt" + "strings" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/add" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/delete" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/parse" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/shift" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/store" +) + +const ( + defaultEventPathSeparator = "." +) + +// Pipeline is a set of Transformations that are +// sequentially applied to JSON data. +type Pipeline struct { + Transformers []transformer.Transformer + Storage *storage.Storage +} + +// register loads available Transformation into a named map. +func register() map[string]transformer.Transformer { + transformations := make(map[string]transformer.Transformer) + + add.Register(transformations) + delete.Register(transformations) + shift.Register(transformations) + store.Register(transformations) + parse.Register(transformations) + + return transformations +} + +// newPipeline loads available Transformations and creates a Pipeline. +func newPipeline(transformations []v1alpha1.Transform, storage *storage.Storage) (*Pipeline, error) { + availableTransformers := register() + pipeline := []transformer.Transformer{} + + for _, transformation := range transformations { + operation, exist := availableTransformers[transformation.Operation] + if !exist { + return nil, fmt.Errorf("transformation %q not found", transformation.Operation) + } + for _, kv := range transformation.Paths { + separator := defaultEventPathSeparator + if kv.Separator != "" { + separator = kv.Separator + } + transformer := operation.New(kv.Key, kv.Value, separator) + transformer.SetStorage(storage) + pipeline = append(pipeline, transformer) + } + } + + return &Pipeline{ + Transformers: pipeline, + Storage: storage, + }, nil +} + +// Apply applies Pipeline transformations. +func (p *Pipeline) apply(eventID string, data []byte, init bool) ([]byte, error) { + var err error + var errs []string + for _, v := range p.Transformers { + if init == v.InitStep() { + if data, err = v.Apply(eventID, data); err != nil { + errs = append(errs, err.Error()) + } + } + } + if len(errs) != 0 { + return data, fmt.Errorf(strings.Join(errs, ",")) + } + return data, nil +} diff --git a/pkg/flow/adapter/transformation/transformer/add/add.go b/pkg/flow/adapter/transformation/transformer/add/add.go new file mode 100644 index 00000000..c86df84a --- /dev/null +++ b/pkg/flow/adapter/transformation/transformer/add/add.go @@ -0,0 +1,141 @@ +package add + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Add)(nil) + +// Add object implements Transformer interface. +type Add struct { + Path string + Value string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = false + +// operationName is used to identify this transformation. +var operationName string = "add" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Add{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (a *Add) SetStorage(storage *storage.Storage) { + a.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (a *Add) InitStep() bool { + return InitStep +} + +// New returns a new instance of Add object. +func (a *Add) New(key, value, separator string) transformer.Transformer { + return &Add{ + Path: key, + Value: value, + Separator: separator, + + variables: a.variables, + } +} + +// Apply is a main method of Transformation that adds any type of +// variables into existing JSON. +func (a *Add) Apply(eventID string, data []byte) ([]byte, error) { + input := convert.SliceToMap(strings.Split(a.Path, a.Separator), a.composeValue(eventID)) + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + + result := convert.MergeJSONWithMap(event, input) + output, err := json.Marshal(result) + if err != nil { + return data, err + } + + return output, nil +} + +func (a *Add) retrieveVariable(eventID, key string) interface{} { + if value := a.variables.Get(eventID, key); value != nil { + return value + } + return key +} + +func (a *Add) composeValue(eventID string) interface{} { + result := a.Value + for _, key := range a.variables.ListEventVariables(eventID) { + // limit the number of iterations to prevent the loop if + // "add" variable is not updating the result (variable is not defined). + variableKeysInResult := strings.Count(result, key) + for i := 0; i <= variableKeysInResult; i++ { + keyIndex := strings.Index(result, key) + if keyIndex == -1 { + continue + } + + storedValue := a.retrieveVariable(eventID, key) + + if result == key { + return storedValue + } + + openingBracketIndex := -1 + closingBracketIndex := -1 + for i := keyIndex; i >= 0; i-- { + if string(result[i]) == "(" { + openingBracketIndex = i + break + } + } + for i := keyIndex; i < len(result); i++ { + if string(result[i]) == ")" { + closingBracketIndex = i + break + } + } + + // there is no brackets in the value + if (openingBracketIndex == -1 || closingBracketIndex == -1) || + // brackets are screened with "\" symbol + ((openingBracketIndex > 0 && string(result[openingBracketIndex-1]) == "\\") || + string(result[closingBracketIndex-1]) == "\\") || + // brackets are not surrounding the key + !(openingBracketIndex < keyIndex && closingBracketIndex >= keyIndex+len(key)) { + result = fmt.Sprintf("%s%v%s", result[:keyIndex], storedValue, result[keyIndex+len(key):]) + continue + } + + if storedValue == key { + // stored value that equals the variable key means no stored value is available + result = fmt.Sprintf("%s%s", result[:openingBracketIndex], result[closingBracketIndex+1:]) + continue + } + + result = result[:openingBracketIndex] + result[openingBracketIndex+1:] + result = result[:closingBracketIndex-1] + result[closingBracketIndex:] + result = fmt.Sprintf("%s%v%s", result[:keyIndex-1], storedValue, result[keyIndex+len(key)-1:]) + } + } + return result +} diff --git a/pkg/flow/adapter/transformation/transformer/delete/delete.go b/pkg/flow/adapter/transformation/transformer/delete/delete.go new file mode 100644 index 00000000..d1b0516c --- /dev/null +++ b/pkg/flow/adapter/transformation/transformer/delete/delete.go @@ -0,0 +1,164 @@ +package delete + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Delete)(nil) + +// Delete object implements Transformer interface. +type Delete struct { + Path string + Value string + Type string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = false + +// operationName is used to identify this transformation. +var operationName string = "delete" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Delete{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (d *Delete) SetStorage(storage *storage.Storage) { + d.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (d *Delete) InitStep() bool { + return InitStep +} + +// New returns a new instance of Delete object. +func (d *Delete) New(key, value, separator string) transformer.Transformer { + return &Delete{ + Path: key, + Value: value, + Separator: separator, + + variables: d.variables, + } +} + +// Apply is a main method of Transformation that removed any type of +// variables from existing JSON. +func (d *Delete) Apply(eventID string, data []byte) ([]byte, error) { + d.Value = d.retrieveString(eventID, d.Value) + + result, err := d.parse(data, "", "") + if err != nil { + return data, err + } + + output, err := json.Marshal(result) + if err != nil { + return data, err + } + + return output, nil +} + +func (d *Delete) retrieveString(eventID, key string) string { + if value := d.variables.Get(eventID, key); value != nil { + if str, ok := value.(string); ok { + return str + } + } + return key +} + +func (d *Delete) parse(data interface{}, key, path string) (interface{}, error) { + output := make(map[string]interface{}) + // TODO: keep only one filter call + if d.filter(path, data) { + return nil, nil + } + switch value := data.(type) { + case []byte: + var m interface{} + if err := json.Unmarshal(value, &m); err != nil { + return nil, fmt.Errorf("unmarshal err: %v", err) + } + o, err := d.parse(m, key, path) + if err != nil { + return nil, fmt.Errorf("recursive call in []bytes case: %v", err) + } + return o, nil + case float64, bool, string, nil: + return value, nil + case []interface{}: + slice := []interface{}{} + for i, v := range value { + o, err := d.parse(v, key, fmt.Sprintf("%s[%d]", path, i)) + if err != nil { + return nil, fmt.Errorf("recursive call in []interface case: %v", err) + } + slice = append(slice, o) + } + return slice, nil + case map[string]interface{}: + for k, v := range value { + subPath := fmt.Sprintf("%s.%s", path, k) + if d.filter(subPath, v) { + continue + } + o, err := d.parse(v, k, subPath) + if err != nil { + return nil, fmt.Errorf("recursive call in map[]interface case: %v", err) + } + output[k] = o + } + } + + return output, nil +} + +func (d *Delete) filter(path string, value interface{}) bool { + switch { + case d.Path != "" && d.Value != "": + return d.filterPathAndValue(path, value) + case d.Path != "": + return d.filterPath(path) + case d.Value != "": + return d.filterValue(value) + } + // consider empty key and path as "delete any" + return true +} + +func (d *Delete) filterPath(path string) bool { + return d.Separator+d.Path == path +} + +func (d *Delete) filterValue(value interface{}) bool { + switch v := value.(type) { + case string: + return v == d.Value + case float64: + return d.Value == strconv.FormatFloat(v, 'f', -1, 64) + case bool: + return d.Value == fmt.Sprintf("%t", v) + } + return false +} + +func (d *Delete) filterPathAndValue(path string, value interface{}) bool { + return d.filterPath(path) && d.filterValue(value) +} diff --git a/pkg/flow/adapter/transformation/transformer/parse/parse.go b/pkg/flow/adapter/transformation/transformer/parse/parse.go new file mode 100644 index 00000000..d2888aee --- /dev/null +++ b/pkg/flow/adapter/transformation/transformer/parse/parse.go @@ -0,0 +1,93 @@ +package parse + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Parse)(nil) + +// Parse object implements Transformer interface. +type Parse struct { + Path string + Value string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = true + +// operationName is used to identify this transformation. +var operationName string = "parse" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Parse{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (p *Parse) SetStorage(storage *storage.Storage) { + p.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (p *Parse) InitStep() bool { + return InitStep +} + +// New returns a new instance of Parse object. +func (p *Parse) New(key, value, separator string) transformer.Transformer { + return &Parse{ + Path: key, + Value: value, + Separator: separator, + + variables: p.variables, + } +} + +// Apply is a main method of Transformation that parse JSON values +// into variables that can be used by other Transformations in a pipeline. +func (p *Parse) Apply(eventID string, data []byte) ([]byte, error) { + path := convert.SliceToMap(strings.Split(p.Path, p.Separator), "") + + switch p.Value { + case "json", "JSON": + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + jsonValue, err := parseJSON(common.ReadValue(event, path)) + if err != nil { + return data, err + } + newObject := convert.SliceToMap(strings.Split(p.Path, p.Separator), jsonValue) + return json.Marshal(convert.MergeJSONWithMap(event, newObject)) + default: + return data, fmt.Errorf("parse operation does not support %q type of value", p.Value) + } +} + +func parseJSON(data interface{}) (interface{}, error) { + str, ok := data.(string) + if !ok { + return nil, fmt.Errorf("unable to cast the value to string type") + } + var object interface{} + if err := json.Unmarshal([]byte(str), &object); err != nil { + return nil, fmt.Errorf("failed to unmarshal value: %w", err) + } + return object, nil +} diff --git a/pkg/flow/adapter/transformation/transformer/shift/shift.go b/pkg/flow/adapter/transformation/transformer/shift/shift.go new file mode 100644 index 00000000..f4d0cd5c --- /dev/null +++ b/pkg/flow/adapter/transformation/transformer/shift/shift.go @@ -0,0 +1,193 @@ +package shift + +import ( + "encoding/json" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Shift)(nil) + +// Shift object implements Transformer interface. +type Shift struct { + Path string + NewPath string + Value string + Separator string + + variables *storage.Storage +} + +const delimeter string = ":" + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = false + +// operationName is used to identify this transformation. +var operationName string = "shift" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Shift{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (s *Shift) SetStorage(storage *storage.Storage) { + s.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (s *Shift) InitStep() bool { + return InitStep +} + +// New returns a new instance of Shift object. +func (s *Shift) New(key, value, separator string) transformer.Transformer { + // doubtful scheme, review needed + keys := strings.Split(key, delimeter) + if len(keys) != 2 { + return nil + } + return &Shift{ + Path: keys[0], + NewPath: keys[1], + Value: value, + Separator: separator, + + variables: s.variables, + } +} + +// Apply is a main method of Transformation that moves existing +// values to a new locations. +func (s *Shift) Apply(eventID string, data []byte) ([]byte, error) { + oldPath := convert.SliceToMap(strings.Split(s.Path, s.Separator), "") + + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + + newEvent, value := extractValue(event, oldPath) + if s.Value != "" { + if !equal(s.retrieveInterface(eventID, s.Value), value) { + return data, nil + } + } + if value == nil { + return data, nil + } + + newPath := convert.SliceToMap(strings.Split(s.NewPath, s.Separator), value) + result := convert.MergeJSONWithMap(newEvent, newPath) + output, err := json.Marshal(result) + if err != nil { + return data, err + } + + return output, nil +} + +func (s *Shift) retrieveInterface(eventID, key string) interface{} { + if value := s.variables.Get(eventID, key); value != nil { + return value + } + return key +} + +func extractValue(source interface{}, path map[string]interface{}) (map[string]interface{}, interface{}) { + var ok bool + var result interface{} + sourceMap := make(map[string]interface{}) + for k, v := range path { + switch value := v.(type) { + case float64, bool, string: + sourceMap, ok = source.(map[string]interface{}) + if !ok { + break + } + result = sourceMap[k] + delete(sourceMap, k) + case []interface{}: + if k != "" { + // array is inside the object + // {"foo":[{},{},{}]} + sourceMap, ok = source.(map[string]interface{}) + if !ok { + break + } + source, ok = sourceMap[k] + if !ok { + break + } + } + // array is a root object + // [{},{},{}] + sourceArr, ok := source.([]interface{}) + if !ok { + break + } + + index := len(value) - 1 + if index >= len(sourceArr) { + break + } + + m, ok := value[index].(map[string]interface{}) + if ok { + sourceArr[index], result = extractValue(sourceArr[index].(map[string]interface{}), m) + sourceMap[k] = sourceArr + break + } + result = sourceArr[index] + sourceMap[k] = sourceArr[:index] + if len(sourceArr) > index { + sourceMap[k] = append(sourceArr[:index], sourceArr[index+1:]...) + } + case map[string]interface{}: + if k == "" { + result = source + break + } + sourceMap, ok = source.(map[string]interface{}) + if !ok { + break + } + if _, ok := sourceMap[k]; !ok { + break + } + sourceMap[k], result = extractValue(sourceMap[k], value) + case nil: + sourceMap[k] = nil + } + } + return sourceMap, result +} + +func equal(a, b interface{}) bool { + switch value := b.(type) { + case string: + v, ok := a.(string) + if ok && v == value { + return true + } + case bool: + v, ok := a.(bool) + if ok && v == value { + return true + } + case float64: + v, ok := a.(float64) + if ok && v == value { + return true + } + } + return false +} diff --git a/pkg/flow/adapter/transformation/transformer/store/store.go b/pkg/flow/adapter/transformation/transformer/store/store.go new file mode 100644 index 00000000..8e9326df --- /dev/null +++ b/pkg/flow/adapter/transformation/transformer/store/store.go @@ -0,0 +1,75 @@ +package store + +import ( + "encoding/json" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Store)(nil) + +// Store object implements Transformer interface. +type Store struct { + Path string + Value string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = true + +// operationName is used to identify this transformation. +var operationName string = "store" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Store{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (s *Store) SetStorage(storage *storage.Storage) { + s.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (s *Store) InitStep() bool { + return InitStep +} + +// New returns a new instance of Store object. +func (s *Store) New(key, value, separator string) transformer.Transformer { + return &Store{ + Path: key, + Value: value, + Separator: separator, + + variables: s.variables, + } +} + +// Apply is a main method of Transformation that stores JSON values +// into variables that can be used by other Transformations in a pipeline. +func (s *Store) Apply(eventID string, data []byte) ([]byte, error) { + path := convert.SliceToMap(strings.Split(s.Value, s.Separator), "") + + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + + value := common.ReadValue(event, path) + + s.variables.Set(eventID, s.Path, value) + + return data, nil +} diff --git a/pkg/flow/adapter/transformation/transformer/transformer.go b/pkg/flow/adapter/transformation/transformer/transformer.go new file mode 100644 index 00000000..4a6059d8 --- /dev/null +++ b/pkg/flow/adapter/transformation/transformer/transformer.go @@ -0,0 +1,14 @@ +package transformer + +import ( + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" +) + +// Transformer is an interface that contains common methods +// to work with JSON data. +type Transformer interface { + New(key, value, separator string) Transformer + Apply(eventID string, data []byte) ([]byte, error) + SetStorage(*storage.Storage) + InitStep() bool +} diff --git a/pkg/flow/adapter/xmltojsontransformation/adapter.go b/pkg/flow/adapter/xmltojsontransformation/adapter.go new file mode 100644 index 00000000..c115c208 --- /dev/null +++ b/pkg/flow/adapter/xmltojsontransformation/adapter.go @@ -0,0 +1,128 @@ +package xmltojsontransformation + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "io" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + xj "github.com/basgys/goxml2json" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` + // CloudEvents responses parametrization + CloudEventPayloadPolicy string `envconfig:"EVENTS_PAYLOAD_POLICY" default:"error"` + // Sink defines the target sink for the events. If no Sink is defined the + // events are replied back to the sender. + Sink string `envconfig:"K_SINK"` +} + +// NewAdapter adapter implementation +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.XMLToJSONTransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + replier, err := targetce.New(env.Component, logger.Named("replier"), + targetce.ReplierWithStatefulHeaders(env.BridgeIdentifier), + targetce.ReplierWithStaticResponseType(v1alpha1.EventTypeXMLToJSONGenericResponse), + targetce.ReplierWithPayloadPolicy(targetce.PayloadPolicy(env.CloudEventPayloadPolicy))) + if err != nil { + logger.Panicf("Error creating CloudEvents replier: %v", err) + } + + return &Adapter{ + sink: env.Sink, + replier: replier, + ceClient: ceClient, + logger: logger, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + } +} + +var _ pkgadapter.Adapter = (*Adapter)(nil) + +type Adapter struct { + sink string + replier *targetce.Replier + ceClient cloudevents.Client + logger *zap.SugaredLogger + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter +} + +// Start is a blocking function and will return if an error occurs +// or the context is cancelled. +func (a *Adapter) Start(ctx context.Context) error { + a.logger.Info("Starting XMLToJSONTransformation Adapter") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *Adapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + if !isValidXML(event.Data()) { + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + errors.New("invalid XML"), nil) + } + + xml := bytes.NewReader(event.Data()) + jsn, err := xj.Convert(xml) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + readBuf, err := io.ReadAll(jsn) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if err := event.SetData(cloudevents.ApplicationJSON, readBuf); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if a.sink != "" { + if result := a.ceClient.Send(ctx, event); !cloudevents.IsACK(result) { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + return nil, cloudevents.ResultACK + } + + return &event, cloudevents.ResultACK +} + +func isValidXML(data []byte) bool { + return xml.Unmarshal(data, new(interface{})) == nil +} diff --git a/pkg/flow/adapter/xslttransformation/adapter.go b/pkg/flow/adapter/xslttransformation/adapter.go new file mode 100644 index 00000000..8392ef8a --- /dev/null +++ b/pkg/flow/adapter/xslttransformation/adapter.go @@ -0,0 +1,153 @@ +//go:build !noclibs + +package xslttransformation + +import ( + "context" + "errors" + "fmt" + "runtime" + + xslt "github.com/wamuir/go-xslt" + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +var _ pkgadapter.Adapter = (*xsltTransformAdapter)(nil) + +type xsltTransformAdapter struct { + defaultXSLT *xslt.Stylesheet + xsltOverride bool + + replier *targetce.Replier + ceClient cloudevents.Client + logger *zap.SugaredLogger + sink string + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter +} + +// NewTarget adapter implementation +func NewTarget(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.XSLTTransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + if err := env.validate(); err != nil { + logger.Panicf("Configuration error: %v", err) + } + + replier, err := targetce.New(env.Component, logger.Named("replier"), + targetce.ReplierWithStatefulHeaders(env.BridgeIdentifier), + targetce.ReplierWithStaticDataContentType(cloudevents.ApplicationXML), + targetce.ReplierWithStaticErrorDataContentType(*cloudevents.StringOfApplicationJSON()), + targetce.ReplierWithPayloadPolicy(targetce.PayloadPolicy(targetce.PayloadPolicyAlways))) + if err != nil { + logger.Panicf("Error creating CloudEvents replier: %v", err) + } + + adapter := &xsltTransformAdapter{ + xsltOverride: env.AllowXSLTOverride, + + replier: replier, + ceClient: ceClient, + logger: logger, + sink: env.Sink, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + } + + if env.XSLT != "" { + adapter.defaultXSLT, err = xslt.NewStylesheet([]byte(env.XSLT)) + if err != nil { + logger.Panicf("XSLT validation error: %v", err) + } + + runtime.SetFinalizer(adapter.defaultXSLT, (*xslt.Stylesheet).Close) + } + + return adapter +} + +// Start is a blocking function and will return if an error occurs +// or the context is cancelled. +func (a *xsltTransformAdapter) Start(ctx context.Context) error { + a.logger.Info("Starting XSLT transformer") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *xsltTransformAdapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + isStructuredTransform := event.Type() == v1alpha1.EventTypeXSLTTransformation + if isStructuredTransform && !a.xsltOverride { + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + errors.New("it is not allowed to override XSLT per CloudEvent"), nil) + } + + isXML := event.DataMediaType() == cloudevents.ApplicationXML + + var style *xslt.Stylesheet + var xmlin []byte + var err error + + switch { + case isStructuredTransform: + req := &XSLTTransformationStructuredRequest{} + if err := event.DataAs(req); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + + xmlin = []byte(req.XML) + style, err = xslt.NewStylesheet([]byte(req.XSLT)) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + defer style.Close() + + case isXML: + xmlin = event.DataEncoded + style = a.defaultXSLT + + default: + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + errors.New("unexpected type or media-type for the incoming event"), nil) + } + + res, err := style.Transform(xmlin) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + fmt.Errorf("error processing XML with XSLT: %v", err), nil) + } + + if a.sink != "" { + event.SetType(event.Type() + ".response") + if err := event.SetData(cloudevents.ApplicationXML, res); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if result := a.ceClient.Send(ctx, event); !cloudevents.IsACK(result) { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, "sending the cloudevent to the sink") + } + return nil, cloudevents.ResultACK + } + + return a.replier.Ok(&event, res, targetce.ResponseWithDataContentType(cloudevents.ApplicationXML)) +} diff --git a/pkg/flow/adapter/xslttransformation/config.go b/pkg/flow/adapter/xslttransformation/config.go new file mode 100644 index 00000000..51fa4d75 --- /dev/null +++ b/pkg/flow/adapter/xslttransformation/config.go @@ -0,0 +1,35 @@ +//go:build !noclibs + +package xslttransformation + +import ( + "errors" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" +) + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + // XSLT document that will be used by default for transformation. + XSLT string `envconfig:"XSLTTRANSFORMATION_XSLT"` + // If set to true, enables consuming structured CloudEvents that include + // fields for the XML and XSLT field. + AllowXSLTOverride bool `envconfig:"XSLTTRANSFORMATION_ALLOW_XSLT_OVERRIDE" required:"true"` + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` + // Sink defines the target sink for the events. If no Sink is defined the + // events are replied back to the sender. + Sink string `envconfig:"K_SINK"` +} + +func (e *envAccessor) validate() error { + if !e.AllowXSLTOverride && e.XSLT == "" { + return errors.New("if XSLT cannot be overridden by CloudEvent payloads, configured XSLT cannot be empty") + } + return nil +} diff --git a/pkg/flow/adapter/xslttransformation/types.go b/pkg/flow/adapter/xslttransformation/types.go new file mode 100644 index 00000000..34bd9166 --- /dev/null +++ b/pkg/flow/adapter/xslttransformation/types.go @@ -0,0 +1,10 @@ +//go:build !noclibs + +package xslttransformation + +// XSLTTransformationStructuredRequest contains an opinionated structure +// that informs both the XML and XSLT to transform. +type XSLTTransformationStructuredRequest struct { + XML string `json:"xml"` + XSLT string `json:"xslt,omitempty"` +} diff --git a/pkg/flow/reconciler/jqtransformation/adapter.go b/pkg/flow/reconciler/jqtransformation/adapter.go new file mode 100644 index 00000000..66ad3143 --- /dev/null +++ b/pkg/flow/reconciler/jqtransformation/adapter.go @@ -0,0 +1,66 @@ +package jqtransformation + +import ( + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envQuery = "JQ_QUERY" + envEventsPayloadPolicy = "EVENTS_PAYLOAD_POLICY" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/jqtransformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.JQTransformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.JQTransformation) []corev1.EnvVar { + env := []corev1.EnvVar{ + { + Name: envQuery, + Value: o.Spec.Query, + }, + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + } + + if o.Spec.EventOptions != nil && o.Spec.EventOptions.PayloadPolicy != nil { + env = append(env, corev1.EnvVar{ + Name: envEventsPayloadPolicy, + Value: string(*o.Spec.EventOptions.PayloadPolicy), + }) + } + + return env +} diff --git a/pkg/flow/reconciler/jqtransformation/controller.go b/pkg/flow/reconciler/jqtransformation/controller.go new file mode 100644 index 00000000..5d974a77 --- /dev/null +++ b/pkg/flow/reconciler/jqtransformation/controller.go @@ -0,0 +1,52 @@ +package jqtransformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/jqtransformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/jqtransformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.JQTransformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.JQTransformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().JQTransformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/flow/reconciler/jqtransformation/reconciler.go b/pkg/flow/reconciler/jqtransformation/reconciler.go new file mode 100644 index 00000000..7a5c8bf9 --- /dev/null +++ b/pkg/flow/reconciler/jqtransformation/reconciler.go @@ -0,0 +1,30 @@ +package jqtransformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/jqtransformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.JQTransformation, listersv1alpha1.JQTransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.JQTransformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/flow/reconciler/synchronizer/adapter.go b/pkg/flow/reconciler/synchronizer/adapter.go new file mode 100644 index 00000000..70d55925 --- /dev/null +++ b/pkg/flow/reconciler/synchronizer/adapter.go @@ -0,0 +1,67 @@ +package synchronizer + +import ( + "strconv" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/synchronizer-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.Synchronizer) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.Synchronizer) []corev1.EnvVar { + env := []corev1.EnvVar{ + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + { + Name: "CORRELATION_KEY", + Value: o.Spec.CorrelationKey.Attribute, + }, + { + Name: "RESPONSE_WAIT_TIMEOUT", + Value: o.Spec.Response.Timeout.String(), + }, + } + + if o.Spec.CorrelationKey.Length != 0 { + env = append(env, corev1.EnvVar{ + Name: "CORRELATION_KEY_LENGTH", + Value: strconv.Itoa(o.Spec.CorrelationKey.Length), + }) + } + + return env +} diff --git a/pkg/flow/reconciler/synchronizer/controller.go b/pkg/flow/reconciler/synchronizer/controller.go new file mode 100644 index 00000000..e0112be5 --- /dev/null +++ b/pkg/flow/reconciler/synchronizer/controller.go @@ -0,0 +1,52 @@ +package synchronizer + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/synchronizer" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/synchronizer" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.Synchronizer)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.Synchronizer]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().Synchronizers, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/flow/reconciler/synchronizer/reconciler.go b/pkg/flow/reconciler/synchronizer/reconciler.go new file mode 100644 index 00000000..64670f3b --- /dev/null +++ b/pkg/flow/reconciler/synchronizer/reconciler.go @@ -0,0 +1,30 @@ +package synchronizer + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/synchronizer" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.Synchronizer, listersv1alpha1.SynchronizerNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.Synchronizer) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/flow/reconciler/transformation/adapter.go b/pkg/flow/reconciler/transformation/adapter.go new file mode 100644 index 00000000..c935d5f5 --- /dev/null +++ b/pkg/flow/reconciler/transformation/adapter.go @@ -0,0 +1,69 @@ +package transformation + +import ( + "encoding/json" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envTransformationCtx = "TRANSFORMATION_CONTEXT" + envTransformationData = "TRANSFORMATION_DATA" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/transformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.Transformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.Transformation) []corev1.EnvVar { + var trnContext string + if b, err := json.Marshal(o.Spec.Context); err == nil { + trnContext = string(b) + } + + var trnData string + if b, err := json.Marshal(o.Spec.Data); err == nil { + trnData = string(b) + } + + return []corev1.EnvVar{ + { + Name: envTransformationCtx, + Value: trnContext, + }, + { + Name: envTransformationData, + Value: trnData, + }, + } +} diff --git a/pkg/flow/reconciler/transformation/controller.go b/pkg/flow/reconciler/transformation/controller.go new file mode 100644 index 00000000..7209565c --- /dev/null +++ b/pkg/flow/reconciler/transformation/controller.go @@ -0,0 +1,52 @@ +package transformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/transformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/transformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.Transformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.Transformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().Transformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/flow/reconciler/transformation/reconciler.go b/pkg/flow/reconciler/transformation/reconciler.go new file mode 100644 index 00000000..1efc9fc9 --- /dev/null +++ b/pkg/flow/reconciler/transformation/reconciler.go @@ -0,0 +1,30 @@ +package transformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/transformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.Transformation, listersv1alpha1.TransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.Transformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/flow/reconciler/xmltojsontransformation/adapter.go b/pkg/flow/reconciler/xmltojsontransformation/adapter.go new file mode 100644 index 00000000..40721858 --- /dev/null +++ b/pkg/flow/reconciler/xmltojsontransformation/adapter.go @@ -0,0 +1,61 @@ +package xmltojsontransformation + +import ( + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envEventsPayloadPolicy = "EVENTS_PAYLOAD_POLICY" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/xmltojsontransformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.XMLToJSONTransformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.XMLToJSONTransformation) []corev1.EnvVar { + env := []corev1.EnvVar{ + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + } + + if o.Spec.EventOptions != nil && o.Spec.EventOptions.PayloadPolicy != nil { + env = append(env, corev1.EnvVar{ + Name: envEventsPayloadPolicy, + Value: string(*o.Spec.EventOptions.PayloadPolicy), + }) + } + + return env +} diff --git a/pkg/flow/reconciler/xmltojsontransformation/controller.go b/pkg/flow/reconciler/xmltojsontransformation/controller.go new file mode 100644 index 00000000..b8ca612c --- /dev/null +++ b/pkg/flow/reconciler/xmltojsontransformation/controller.go @@ -0,0 +1,52 @@ +package xmltojsontransformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/xmltojsontransformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xmltojsontransformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.XMLToJSONTransformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.XMLToJSONTransformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().XMLToJSONTransformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/flow/reconciler/xmltojsontransformation/reconciler.go b/pkg/flow/reconciler/xmltojsontransformation/reconciler.go new file mode 100644 index 00000000..16db289e --- /dev/null +++ b/pkg/flow/reconciler/xmltojsontransformation/reconciler.go @@ -0,0 +1,30 @@ +package xmltojsontransformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xmltojsontransformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.XMLToJSONTransformation, listersv1alpha1.XMLToJSONTransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.XMLToJSONTransformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/flow/reconciler/xslttransformation/adapter.go b/pkg/flow/reconciler/xslttransformation/adapter.go new file mode 100644 index 00000000..ad9e1541 --- /dev/null +++ b/pkg/flow/reconciler/xslttransformation/adapter.go @@ -0,0 +1,65 @@ +package xslttransformation + +import ( + "strconv" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envXSLT = "XSLTTRANSFORMATION_XSLT" + envAllowXSLTOverride = "XSLTTRANSFORMATION_ALLOW_XSLT_OVERRIDE" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/xslttransformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.XSLTTransformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.XSLTTransformation) []corev1.EnvVar { + env := []corev1.EnvVar{ + *o.Spec.XSLT.ToEnvironmentVariable(envXSLT), + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + } + + if o.Spec.AllowPerEventXSLT != nil { + env = append(env, corev1.EnvVar{ + Name: envAllowXSLTOverride, + Value: strconv.FormatBool(*o.Spec.AllowPerEventXSLT), + }) + } + + return env +} diff --git a/pkg/flow/reconciler/xslttransformation/controller.go b/pkg/flow/reconciler/xslttransformation/controller.go new file mode 100644 index 00000000..d78cccbf --- /dev/null +++ b/pkg/flow/reconciler/xslttransformation/controller.go @@ -0,0 +1,52 @@ +package xslttransformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/xslttransformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xslttransformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.XSLTTransformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.XSLTTransformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().XSLTTransformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/flow/reconciler/xslttransformation/reconciler.go b/pkg/flow/reconciler/xslttransformation/reconciler.go new file mode 100644 index 00000000..128533a9 --- /dev/null +++ b/pkg/flow/reconciler/xslttransformation/reconciler.go @@ -0,0 +1,30 @@ +package xslttransformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xslttransformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.XSLTTransformation, listersv1alpha1.XSLTTransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.XSLTTransformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/adapter/cloudeventssource/adapter.go b/pkg/sources/adapter/cloudeventssource/adapter.go new file mode 100644 index 00000000..9bc96841 --- /dev/null +++ b/pkg/sources/adapter/cloudeventssource/adapter.go @@ -0,0 +1,81 @@ +package cloudeventssource + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cehttp "github.com/cloudevents/sdk-go/v2/protocol/http" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/adapter/fs" + "github.com/zeiss/typhoon/pkg/apis/sources" + "github.com/zeiss/typhoon/pkg/sources/adapter/cloudeventssource/ratelimiter" +) + +// NewAdapter satisfies pkgadapter.AdapterConstructor. +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: sources.CloudEventsSourceResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + env := envAcc.(*envAccessor) + + cfw, err := fs.NewCachedFileWatcher(logger) + if err != nil { + logger.Panicw("Could not create a file watcher", zap.Error(err)) + } + + for _, as := range env.BasicAuths { + if err := cfw.Add(as.MountedValueFile); err != nil { + logger.Panicw( + fmt.Sprintf("Authentication secret at %q could not be watched", as.MountedValueFile), + zap.Error(err)) + } + } + + ceh := &cloudEventsHandler{ + basicAuths: env.BasicAuths, + + cfw: cfw, + ceClient: ceClient, + logger: logger, + mt: mt, + } + + // prepare CE server options + options := []cehttp.Option{} + + if env.Path != "" { + options = append(options, cehttp.WithPath(env.Path)) + } + if len(env.BasicAuths) != 0 { + options = append(options, cehttp.WithMiddleware(ceh.handleAuthentication)) + } + + if env.RequestsPerSecond != 0 { + rl, err := ratelimiter.New(env.RequestsPerSecond) + if err != nil { + logger.Panicw("Could not create rate limiter", zap.Error(err)) + } + options = append(options, cehttp.WithRateLimiter(rl)) + } + + ceServer, err := cloudevents.NewClientHTTP(options...) + if err != nil { + logger.Panicw("Error creating CloudEvents client", zap.Error(err)) + } + + ceh.ceServer = ceServer + return ceh +} + +var _ pkgadapter.Adapter = (*cloudEventsHandler)(nil) diff --git a/pkg/sources/adapter/cloudeventssource/cloudevents.go b/pkg/sources/adapter/cloudeventssource/cloudevents.go new file mode 100644 index 00000000..1c93317c --- /dev/null +++ b/pkg/sources/adapter/cloudeventssource/cloudevents.go @@ -0,0 +1,88 @@ +package cloudeventssource + +import ( + "context" + "crypto/sha256" + "crypto/subtle" + "fmt" + "net/http" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + + "github.com/zeiss/typhoon/pkg/adapter/fs" +) + +type cloudEventsHandler struct { + basicAuths KeyMountedValues + + cfw fs.CachedFileWatcher + ceServer cloudevents.Client + ceClient cloudevents.Client + logger *zap.SugaredLogger + mt *pkgadapter.MetricTag +} + +// Start implements adapter.Adapter. +func (h *cloudEventsHandler) Start(ctx context.Context) error { + h.cfw.Start(ctx) + return h.ceServer.StartReceiver(ctx, h.handle) +} + +func (h *cloudEventsHandler) handle(ctx context.Context, e event.Event) protocol.Result { + err := e.Validate() + if err != nil { + h.logger.Errorw("Incoming CloudEvent is not valid", zap.Error(err)) + return protocol.ResultNACK + } + + result := h.ceClient.Send(ctx, e) + if !cloudevents.IsACK(result) { + h.logger.Errorw("Could not send CloudEvent", zap.Error(result)) + } + + return result +} + +// code based on VMware's VEBA's webhook: +// https://github.com/vmware-samples/vcenter-event-broker-appliance/blob/e91e4bd8a17dad6ce4fe370c42a15694c03dac88/vmware-event-router/internal/provider/webhook/webhook.go#L167-L189 +func (h *cloudEventsHandler) handleAuthentication(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + username, password, ok := r.BasicAuth() + + if ok { + // reduce brute-force guessing attacks with constant-time comparisons + usernameHash := sha256.Sum256([]byte(username)) + passwordHash := sha256.Sum256([]byte(password)) + + for _, kv := range h.basicAuths { + p, err := h.cfw.GetContent(kv.MountedValueFile) + if err != nil { + h.logger.Errorw( + fmt.Sprintf("Could not retrieve password for user %q", kv.Key), + zap.Error(err)) + continue + } + + expectedUsernameHash := sha256.Sum256([]byte(kv.Key)) + expectedPasswordHash := sha256.Sum256(p) + + usernameMatch := subtle.ConstantTimeCompare(usernameHash[:], expectedUsernameHash[:]) == 1 + passwordMatch := subtle.ConstantTimeCompare(passwordHash[:], expectedPasswordHash[:]) == 1 + + if usernameMatch && passwordMatch { + next.ServeHTTP(w, r) + return + } + } + } + + w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + }) +} diff --git a/pkg/sources/adapter/cloudeventssource/env.go b/pkg/sources/adapter/cloudeventssource/env.go new file mode 100644 index 00000000..245a63aa --- /dev/null +++ b/pkg/sources/adapter/cloudeventssource/env.go @@ -0,0 +1,33 @@ +package cloudeventssource + +import ( + "encoding/json" + + cereconciler "github.com/zeiss/typhoon/pkg/sources/reconciler/cloudeventssource" + "knative.dev/eventing/pkg/adapter/v2" +) + +// NewEnvConfig satisfies pkgadapter.EnvConfigConstructor. +func NewEnvConfig() adapter.EnvConfigAccessor { + return &envAccessor{} +} + +// KeyMountedValues contains a set of file mounted values +// by their name. +type KeyMountedValues []cereconciler.KeyMountedValue + +// Decode an array of KeyMountedValues +func (is *KeyMountedValues) Decode(value string) error { + if err := json.Unmarshal([]byte(value), is); err != nil { + return err + } + return nil +} + +type envAccessor struct { + adapter.EnvConfig + + Path string `envconfig:"CLOUDEVENTS_PATH"` + BasicAuths KeyMountedValues `envconfig:"CLOUDEVENTS_BASICAUTH_CREDENTIALS"` + RequestsPerSecond uint64 `envconfig:"CLOUDEVENTS_RATELIMITER_RPS"` +} diff --git a/pkg/sources/adapter/cloudeventssource/ratelimiter/ratelimiter.go b/pkg/sources/adapter/cloudeventssource/ratelimiter/ratelimiter.go new file mode 100644 index 00000000..3cb25eaf --- /dev/null +++ b/pkg/sources/adapter/cloudeventssource/ratelimiter/ratelimiter.go @@ -0,0 +1,45 @@ +package ratelimiter + +import ( + "context" + "net/http" + "time" + + cehttp "github.com/cloudevents/sdk-go/v2/protocol/http" + "github.com/sethvargo/go-limiter" + "github.com/sethvargo/go-limiter/memorystore" +) + +const ( + // token to be used globally for every request. + globalToken = "global" +) + +type rateLimiter struct { + store limiter.Store +} + +// New creates a new rate limiter. +func New(rps uint64) (cehttp.RateLimiter, error) { + if store, err := memorystore.New(&memorystore.Config{ + Tokens: rps, + Interval: time.Second, + }); err != nil { + return nil, err + } else { + return &rateLimiter{ + store: store, + }, nil + } +} + +// Allow checks if a request is allowed to pass the rate limiter filter. +func (rl *rateLimiter) Allow(ctx context.Context, _ *http.Request) (ok bool, reset uint64, err error) { + _, _, reset, ok, err = rl.store.Take(ctx, globalToken) + return ok, reset, err +} + +// Close cleans up rate limiter resources. +func (rl *rateLimiter) Close(ctx context.Context) error { + return rl.store.Close(ctx) +} diff --git a/pkg/sources/adapter/common/backoff.go b/pkg/sources/adapter/common/backoff.go new file mode 100644 index 00000000..880ba063 --- /dev/null +++ b/pkg/sources/adapter/common/backoff.go @@ -0,0 +1,107 @@ +package common + +import ( + "context" + "math" + "sync/atomic" + "time" +) + +// default values for backoff +const ( + expFactor = 2 + + defaultMinBackoff = 1 * time.Second + defaultMaxBackoff = 32 * time.Second +) + +// Backoff provides a simple exponential backoff mechanism. +type Backoff struct { + step *int32 + factor float64 + min, max time.Duration +} + +// RunFunc is a user function that polls data from a source and sends it as a +// CloudEvent to a sink. +// RunFunc must return (bool, error) values where bool is true if the poll +// backoff duration must be reset, and error is the result of the function's +// execution. +type RunFunc func(context.Context) (bool /*reset*/, error /*exit*/) + +// NewBackoff accepts optional values for minimum and maximum wait period and +// returns a new instance of Backoff. +func NewBackoff(args ...time.Duration) *Backoff { + backoff := &Backoff{ + step: new(int32), + factor: expFactor, + min: defaultMinBackoff, + max: defaultMaxBackoff, + } + + switch len(args) { + case 1: + if args[0] <= backoff.max { + backoff.min = args[0] + } + case 2: + if args[0] <= args[1] { + backoff.min = args[0] + backoff.max = args[1] + } + } + + return backoff +} + +// Duration returns the exponential backoff duration calculated for the current step. +func (b *Backoff) Duration() time.Duration { + dur := time.Duration(float64(b.min)*math.Pow(b.factor, float64(atomic.LoadInt32(b.step))) - float64(b.min)) + + switch { + case dur < b.min: + atomic.AddInt32(b.step, 1) + return b.min + case dur > b.max: + return b.max + default: + atomic.AddInt32(b.step, 1) + return dur + } +} + +// Reset sets step counter to zero. +func (b *Backoff) Reset() { + atomic.StoreInt32(b.step, 0) +} + +// Run is a blocking function that executes RunFunc until stopCh receives a +// value or fn returns an error. +func (b *Backoff) Run(stopCh <-chan struct{}, fn RunFunc) error { + timer := time.NewTimer(0) + defer timer.Stop() + + // FIXME(antoineco): never canceled until stopCh receives a value, + // after which the fn is never invoked again, so ctx does effectively + // nothing. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for { + select { + case <-stopCh: + return nil + + case <-timer.C: + reset, err := fn(ctx) + if err != nil { + return err + } + + if reset { + b.Reset() + } + timer.Reset(b.Duration()) + } + } +} diff --git a/pkg/sources/adapter/common/controller/controller.go b/pkg/sources/adapter/common/controller/controller.go new file mode 100644 index 00000000..9518c897 --- /dev/null +++ b/pkg/sources/adapter/common/controller/controller.go @@ -0,0 +1,16 @@ +// Package controller contains helpers shared between controllers embedded in +// source adapters. +package controller + +import "knative.dev/pkg/controller" + +// Opts returns a callback function that sets the controller's agent name and +// configures the reconciler to skip status updates. +func Opts(component string) controller.OptionsFn { + return func(impl *controller.Impl) controller.Options { + return controller.Options{ + AgentName: component, + SkipStatusUpdates: true, + } + } +} diff --git a/pkg/sources/adapter/common/doc.go b/pkg/sources/adapter/common/doc.go new file mode 100644 index 00000000..8b25c1f7 --- /dev/null +++ b/pkg/sources/adapter/common/doc.go @@ -0,0 +1,2 @@ +// Package common contains various helpers for adapters. +package common diff --git a/pkg/sources/adapter/common/env/env.go b/pkg/sources/adapter/common/env/env.go new file mode 100644 index 00000000..21b09e6e --- /dev/null +++ b/pkg/sources/adapter/common/env/env.go @@ -0,0 +1,43 @@ +// Package env allows propagating runtime configurations via the environment. +package env + +import ( + "github.com/kelseyhightower/envconfig" + "knative.dev/eventing/pkg/adapter/v2" +) + +// ConfigAccessor is a superset of adaper.EnvConfigAccessor that overrides +// properties about certain variables. +type ConfigAccessor interface { + adapter.EnvConfigAccessor + // Get the component name. + GetComponent() string +} + +// Config is the minimal set of configuration parameters source adapters should support. +type Config struct { + *adapter.EnvConfig + // Environment variable containing the namespace of the adapter. + Namespace string `envconfig:"NAMESPACE" required:"true"` + // Component is the kind of this adapter. + Component string `envconfig:"K_COMPONENT" required:"true"` +} + +// Verify that Config implements ConfigAccessor. +var _ ConfigAccessor = (*Config)(nil) + +// GetComponent implements ConfigAccessor. +func (c *Config) GetComponent() string { + return c.Component +} + +// ConfigConstructor is a callback function that returns a ConfigAccessor. +type ConfigConstructor func() ConfigAccessor + +// MustProcessConfig populates the specified adapter.EnvConfigConstructor based +// on environment variables. +func MustProcessConfig(envCtor ConfigConstructor) ConfigAccessor { + env := envCtor() + envconfig.MustProcess("", env) + return env +} diff --git a/pkg/sources/adapter/common/health/health.go b/pkg/sources/adapter/common/health/health.go new file mode 100644 index 00000000..421d3811 --- /dev/null +++ b/pkg/sources/adapter/common/health/health.go @@ -0,0 +1,106 @@ +// Package health contains helpers to enable HTTP health checking. +package health + +import ( + "context" + "fmt" + "net/http" + "sync" + "time" + + "go.uber.org/zap" + + "knative.dev/pkg/logging" +) + +const healthPath = "/health" + +// Use a var instead of a const to allow tests to override this value. +var healthPort uint16 = 8080 + +const gracefulHandlerShutdown = 3 * time.Second + +// handler serves requests to the health endpoint. It returns a success HTTP +// code when its value is true. +type handler struct { + sync.RWMutex + ready bool +} + +// Verify that handler implements http.Handler. +var _ http.Handler = (*handler)(nil) + +// ServeHTTP implements http.Handler. +func (h *handler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { + if !h.isReady() { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +func (h *handler) isReady() bool { + h.RLock() + defer h.RUnlock() + + return h.ready +} + +var defaultHandler handler + +// Start runs the default HTTP health handler. +func Start(ctx context.Context) { + mux := &http.ServeMux{} + mux.Handle(healthPath, &defaultHandler) + + server := &http.Server{ + Addr: fmt.Sprintf(":%d", healthPort), + Handler: mux, + } + + errCh := make(chan error) + + go func() { + errCh <- server.ListenAndServe() + }() + + handleServerError := func(err error) { + if err != http.ErrServerClosed { + logging.FromContext(ctx).Errorw("Error during runtime of health server", zap.Error(err)) + } + } + + select { + case <-ctx.Done(): + ctx, cancel := context.WithTimeout(context.Background(), gracefulHandlerShutdown) + defer cancel() + + if err := server.Shutdown(ctx); err != nil { + logging.FromContext(ctx).Errorw("Error during shutdown of health server", zap.Error(err)) + } + + handleServerError(<-errCh) + + case err := <-errCh: + handleServerError(err) + } +} + +// MarkReady indicates that the application is ready to operate. +func MarkReady() { + if defaultHandler.isReady() { + return + } + + defaultHandler.Lock() + defer defaultHandler.Unlock() + + // double-checked lock to ensure we don't write the value of "ready" + // twice if multiple goroutines called MarkReady() simultaneously. + if defaultHandler.ready { + return + } + + defaultHandler.ready = true +} diff --git a/pkg/sources/adapter/common/router/router.go b/pkg/sources/adapter/common/router/router.go new file mode 100644 index 00000000..8998d022 --- /dev/null +++ b/pkg/sources/adapter/common/router/router.go @@ -0,0 +1,63 @@ +package router + +import ( + "html" + "net/http" + "sync" +) + +// Router routes incoming HTTP requests to the adequate handler based on their +// URL path. +type Router struct { + // map of URL path to HTTP handler + handlers sync.Map +} + +// Check that Router implements http.Handler. +var _ http.Handler = (*Router)(nil) + +// RegisterPath registers a HTTP handler for serving requests at the given URL path. +func (r *Router) RegisterPath(urlPath string, h http.Handler) { + r.handlers.Store(urlPath, h) +} + +// DeregisterPath de-registers the HTTP handler for the given URL path. +func (r *Router) DeregisterPath(urlPath string) { + r.handlers.Delete(urlPath) +} + +// HandlersCount returns the number of handlers that are currently registered. +func (r *Router) HandlersCount(filters ...handlerMatcherFunc) int { + var count int + + r.handlers.Range(func(urlPath, _ interface{}) bool { + for _, f := range filters { + if f(urlPath.(string)) { + return true + } + } + + count++ + + return true + }) + + return count +} + +// handlerMatcherFunc is a matcher that allows ignoring some of the handlers +// inside HandlersCount() based on arbitrary predicates. +// The function should return 'true' if the given urlPath matches the +// predicate, in which case the handler is ignored. +type handlerMatcherFunc func(urlPath string) bool + +// ServeHTTP implements http.Handler. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + h, ok := r.handlers.Load(req.URL.Path) + if !ok { + http.Error(w, "No handler for path "+html.EscapeString(req.URL.Path), http.StatusNotFound) + return + } + + h.(http.Handler).ServeHTTP(w, req) +} diff --git a/pkg/sources/adapter/common/router/router_test.go b/pkg/sources/adapter/common/router/router_test.go new file mode 100644 index 00000000..55bc2f79 --- /dev/null +++ b/pkg/sources/adapter/common/router/router_test.go @@ -0,0 +1,97 @@ +package router + +import ( + "net/http" + "net/http/httptest" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const headerHandlerName = "HANDLER" + +func TestRouter(t *testing.T) { + r := &Router{} + + assert.Equal(t, 0, r.HandlersCount()) + + // new router responds with status NotFound + + resp := recordResponse(t, r, "/") + assert.Equal(t, http.StatusNotFound, resp.Code) + + // register 2 handlers + + r.RegisterPath("/foo", responder("foo")) + r.RegisterPath("/bar", responder("bar")) + + assert.Equal(t, 2, r.HandlersCount()) + + assert.Equal(t, []string{"/bar", "/foo"}, handlersKeys(r)) + + resp = recordResponse(t, r, "/foo") + assert.Equal(t, "foo", resp.Header().Get(headerHandlerName)) + + resp = recordResponse(t, r, "/bar") + assert.Equal(t, "bar", resp.Header().Get(headerHandlerName)) + + // attempt to delete unregistered paths + + r.DeregisterPath("/") + r.DeregisterPath("/baz") + + assert.Equal(t, 2, r.HandlersCount()) + + assert.Equal(t, []string{"/bar", "/foo"}, handlersKeys(r)) + + // delete a registered path + + r.DeregisterPath("/foo") + + assert.Equal(t, 1, r.HandlersCount()) + + assert.Equal(t, []string{"/bar"}, handlersKeys(r)) + + resp = recordResponse(t, r, "/foo") + assert.Equal(t, http.StatusNotFound, resp.Code) +} + +// responder returns a HTTP handler that responds to requests with a header +// containing the given handler's name. +func responder(name string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set(headerHandlerName, name) + w.WriteHeader(http.StatusNoContent) + }) +} + +// handlersKeys returns the keys of all the handlers currently registered in +// the given Router, sorted lexically. +func handlersKeys(r *Router) []string { + var keys []string + + r.handlers.Range(func(key, _ interface{}) bool { + keys = append(keys, key.(string)) + return true + }) + + sort.Strings(keys) + + return keys +} + +// recordResponse sends a HTTP request to the provided handler at the given +// URL path and returns the recorded response. +func recordResponse(t *testing.T, h http.Handler, urlPath string) *httptest.ResponseRecorder { + t.Helper() + + req, err := http.NewRequest(http.MethodHead, urlPath, nil) + require.NoError(t, err) + + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + return rr +} diff --git a/pkg/sources/adapter/common/sharedmain/sharedmain.go b/pkg/sources/adapter/common/sharedmain/sharedmain.go new file mode 100644 index 00000000..1ede998b --- /dev/null +++ b/pkg/sources/adapter/common/sharedmain/sharedmain.go @@ -0,0 +1,35 @@ +package sharedmain + +import ( + "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/injection" + "knative.dev/pkg/signals" + + "github.com/zeiss/typhoon/pkg/sources/adapter/common/env" +) + +type ( + namedControllerConstructor func(component string) adapter.ControllerConstructor + namedAdapterConstructor func(component string) adapter.AdapterConstructor +) + +// MainWithController is a shared main tailored to multi-tenant receive-adapters. +// It performs the following initializations: +// - process environment variables +// - enable leader election / HA +// - set the scope to a single namespace +// - inject the given controller constructor +func MainWithController(envCtor env.ConfigConstructor, + cCtor namedControllerConstructor, aCtor namedAdapterConstructor, +) { + envAcc := env.MustProcessConfig(envCtor) + ns := envAcc.GetNamespace() + component := envAcc.GetComponent() + + ctx := signals.NewContext() + ctx = adapter.WithHAEnabled(ctx) + ctx = injection.WithNamespaceScope(ctx, ns) + ctx = adapter.WithController(ctx, cCtor(component)) + + adapter.MainWithEnv(ctx, component, envAcc, aCtor(component)) +} diff --git a/pkg/sources/adapter/httppollersource/adapter.go b/pkg/sources/adapter/httppollersource/adapter.go new file mode 100644 index 00000000..9b93d2a0 --- /dev/null +++ b/pkg/sources/adapter/httppollersource/adapter.go @@ -0,0 +1,72 @@ +package httppollersource + +import ( + "context" + "crypto/tls" + "crypto/x509" + "net/http" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/sources" +) + +// NewAdapter satisfies pkgadapter.AdapterConstructor. +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: sources.HTTPPollerSourceResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + env := envAcc.(*envAccessor) + + t := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: env.SkipVerify}, + } + + if env.CACertificate != "" { + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM([]byte(env.CACertificate)) { + logger.Panicf("Failed adding certificate to pool: %s", env.CACertificate) + } + + t.TLSClientConfig = &tls.Config{ + RootCAs: certPool, + } + } + + httpClient := &http.Client{Transport: t} + + httpRequest, err := http.NewRequest(env.Method, env.Endpoint, nil) + if err != nil { + logger.Panicw("Cannot build request", zap.Error(err)) + } + + for k, v := range env.Headers { + httpRequest.Header.Set(k, v) + } + + if env.BasicAuthUsername != "" || env.BasicAuthPassword != "" { + httpRequest.SetBasicAuth(env.BasicAuthUsername, env.BasicAuthPassword) + } + + return &httpPoller{ + eventType: env.EventType, + eventSource: env.EventSource, + interval: env.Interval, + + httpClient: httpClient, + httpRequest: httpRequest, + + ceClient: ceClient, + logger: logger, + mt: mt, + } +} diff --git a/pkg/sources/adapter/httppollersource/env.go b/pkg/sources/adapter/httppollersource/env.go new file mode 100644 index 00000000..c16cd63c --- /dev/null +++ b/pkg/sources/adapter/httppollersource/env.go @@ -0,0 +1,27 @@ +package httppollersource + +import ( + "time" + + "knative.dev/eventing/pkg/adapter/v2" +) + +// NewEnvConfig satisfies pkgadapter.EnvConfigConstructor. +func NewEnvConfig() adapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + adapter.EnvConfig + + EventType string `envconfig:"HTTPPOLLER_EVENT_TYPE" required:"true"` + EventSource string `envconfig:"HTTPPOLLER_EVENT_SOURCE" required:"true"` + Endpoint string `envconfig:"HTTPPOLLER_ENDPOINT" required:"true"` + Method string `envconfig:"HTTPPOLLER_METHOD" required:"true"` + SkipVerify bool `envconfig:"HTTPPOLLER_SKIP_VERIFY"` + CACertificate string `envconfig:"HTTPPOLLER_CA_CERTIFICATE"` + BasicAuthUsername string `envconfig:"HTTPPOLLER_BASICAUTH_USERNAME"` + BasicAuthPassword string `envconfig:"HTTPPOLLER_BASICAUTH_PASSWORD"` + Headers map[string]string `envconfig:"HTTPPOLLER_HEADERS"` + Interval time.Duration `envconfig:"HTTPPOLLER_INTERVAL" required:"true"` +} diff --git a/pkg/sources/adapter/httppollersource/httppoller.go b/pkg/sources/adapter/httppollersource/httppoller.go new file mode 100644 index 00000000..452dc682 --- /dev/null +++ b/pkg/sources/adapter/httppollersource/httppoller.go @@ -0,0 +1,95 @@ +package httppollersource + +import ( + "context" + "io" + "net/http" + "time" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" +) + +type httpPoller struct { + eventType string + eventSource string + interval time.Duration + + ceClient cloudevents.Client + + httpClient *http.Client + httpRequest *http.Request + logger *zap.SugaredLogger + mt *pkgadapter.MetricTag +} + +var _ pkgadapter.Adapter = (*httpPoller)(nil) + +// Start implements adapter.Adapter. +// Runs the server for receiving HTTP events until ctx gets cancelled. +func (h *httpPoller) Start(ctx context.Context) error { + h.logger.Info("Starting HTTP Poller source") + + ctx = pkgadapter.ContextWithMetricTag(ctx, h.mt) + + // initial request to avoid waiting for the first tick. + h.dispatch(ctx) + + // setup context for the request object. + h.httpRequest = h.httpRequest.Clone(ctx) + + t := time.NewTicker(h.interval) + + for { + select { + + case <-ctx.Done(): + h.logger.Debug("Shutting down HTTP poller") + return nil + + case <-t.C: + h.dispatch(ctx) + } + } +} + +func (h *httpPoller) dispatch(ctx context.Context) { + h.logger.Debug("Launching HTTP request") + + res, err := h.httpClient.Do(h.httpRequest) + if err != nil { + h.logger.Errorw("Failed sending request", zap.Error(err)) + return + } + + defer res.Body.Close() + resb, err := io.ReadAll(res.Body) + if err != nil { + h.logger.Errorw("Failed reading response body", zap.Error(err)) + return + } + + if res.StatusCode >= 300 { + h.logger.Errorw("Received non supported HTTP code from remote endpoint", + zap.Int("code", res.StatusCode), + zap.String("response", string(resb)), + ) + return + } + + event := cloudevents.NewEvent(cloudevents.VersionV1) + event.SetType(h.eventType) + event.SetSource(h.eventSource) + + if err := event.SetData(cloudevents.ApplicationJSON, resb); err != nil { + h.logger.Errorw("Failed to set event data", zap.Error(err)) + return + } + + if result := h.ceClient.Send(ctx, event); !cloudevents.IsACK(result) { + h.logger.Errorw("Could not send Cloud Event", zap.Error(result)) + } +} diff --git a/pkg/sources/adapter/kafkasource/adapter.go b/pkg/sources/adapter/kafkasource/adapter.go new file mode 100644 index 00000000..3c556412 --- /dev/null +++ b/pkg/sources/adapter/kafkasource/adapter.go @@ -0,0 +1,192 @@ +package kafkasource + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "crypto/tls" + "crypto/x509" + "time" + + "go.uber.org/zap" + + "github.com/Shopify/sarama" + cloudevents "github.com/cloudevents/sdk-go/v2" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/sources" +) + +const ( + // Errors for the last 40 seconds will be taken into consideration. + errorAccumulationTolerance = time.Second * 40 + // First 10 errors will be retried right away. + errorAccumulationSupportedCount = 10 + // Next 10 errors will be delayed, after a total of + // 20 errors have been accumulated for the tolerance perior, the + // adapter will exit. + errorAccumulationDelayedCount = 20 + // Delayed consumer retries will wait this duration. + errorAccumulationDelay = time.Second +) + +var _ pkgadapter.Adapter = (*kafkasourceAdapter)(nil) + +type kafkasourceAdapter struct { + ceClient cloudevents.Client + logger *zap.SugaredLogger + mt *pkgadapter.MetricTag + + kafkaClient sarama.ConsumerGroup + topic string +} + +// NewAdapter satisfies pkgadapter.AdapterConstructor. +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + sarama.Logger = zap.NewStdLog(logger.Named("sarama").Desugar()) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: sources.CloudEventsSourceResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + env := envAcc.(*envAccessor) + + var err error + + config := sarama.NewConfig() + + if env.SASLEnable { + mechanism := sarama.SASLMechanism(env.SecurityMechanisms) + + // If the SASL SCRAM mechanism a SCRAM generator must be provided pointing + // to a corresponding hash generator function. + switch mechanism { + case sarama.SASLTypeSCRAMSHA256: + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: sha256.New} } + case sarama.SASLTypeSCRAMSHA512: + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: sha512.New} } + } + + config.Net.SASL.Enable = env.SASLEnable + config.Net.SASL.Mechanism = mechanism + config.Net.SASL.User = env.Username + config.Net.SASL.Password = env.Password + } + + if env.TLSEnable { + config.Net.TLS.Enable = env.TLSEnable + + tlsCfg := &tls.Config{} + if env.CA != "" { + addCAConfig(tlsCfg, env.CA) + } + + if env.ClientCert != "" || env.ClientKey != "" { + if err := addTLSCerts(tlsCfg, env.ClientCert, env.ClientKey); err != nil { + logger.Panicw("Could not parse the TLS Certificates", zap.Error(err)) + } + } + + config.Net.TLS.Config = tlsCfg + config.Net.TLS.Config.InsecureSkipVerify = env.SkipVerify + } + + if env.SecurityMechanisms == "GSSAPI" { + kerberosConfig := sarama.GSSAPIConfig{ + KerberosConfigPath: env.KerberosConfigPath, + ServiceName: env.KerberosServiceName, + Username: env.KerberosUsername, + Password: env.KerberosPassword, + Realm: env.KerberosRealm, + DisablePAFXFAST: true, + } + if env.KerberosKeytabPath != "" { + kerberosConfig.AuthType = sarama.KRB5_KEYTAB_AUTH + kerberosConfig.KeyTabPath = env.KerberosKeytabPath + } else { + kerberosConfig.AuthType = sarama.KRB5_USER_AUTH + } + + config.Net.SASL.GSSAPI = kerberosConfig + } + + err = config.Validate() + if err != nil { + logger.Panicw("Config not valid", zap.Error(err)) + } + + kc, err := sarama.NewConsumerGroup( + env.BootstrapServers, + env.GroupID, config) + if err != nil { + logger.Panicw("Error creating Kafka Consumer Group", zap.Error(err)) + } + + return &kafkasourceAdapter{ + kafkaClient: kc, + topic: env.Topic, + + ceClient: ceClient, + logger: logger, + mt: mt, + } +} + +func (a *kafkasourceAdapter) Start(ctx context.Context) error { + a.logger.Info("Starting Kafka Source Adapter") + + consumerGroup := consumerGroupHandler{ + adapter: a, + } + + errorList := NewStaleList(errorAccumulationTolerance) + + // while the context is not done, run the loop. + for ctx.Err() == nil { + // `Consume` should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims + if err := a.kafkaClient.Consume(ctx, []string{a.topic}, consumerGroup); err != nil { + a.logger.Error("Error setting up the consumer client", zap.Error(err)) + + // Safety net mechanism, we try to re-consume and avoid exiting the adapter. + // This is mainly due to the adapter not being used at environments where + // a restart can be configured (?) + errNum := errorList.AddAndCount(err) + switch { + case errNum < errorAccumulationSupportedCount: + // If errors are occasional let it retry to consume fast. + continue + case errNum < errorAccumulationDelayedCount: + // If errors pile up, we add pauses between retries + a.logger.Info("Slowing down consumer connection loop, too many errors") + time.Sleep(errorAccumulationDelay) + default: + a.logger.Info("Giving up on consumer connection retries, too many errors") + return err + } + } + } + + return nil +} + +func addCAConfig(tlsConfig *tls.Config, caCert string) { + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM([]byte(caCert)) + tlsConfig.RootCAs = caCertPool +} + +func addTLSCerts(tlsConfig *tls.Config, clientCert, clientKey string) error { + cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) + if err == nil { + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return err +} diff --git a/pkg/sources/adapter/kafkasource/env.go b/pkg/sources/adapter/kafkasource/env.go new file mode 100644 index 00000000..692ca893 --- /dev/null +++ b/pkg/sources/adapter/kafkasource/env.go @@ -0,0 +1,36 @@ +package kafkasource + +import ( + "knative.dev/eventing/pkg/adapter/v2" +) + +// NewEnvConfig satisfies pkgadapter.EnvConfigConstructor. +func NewEnvConfig() adapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + adapter.EnvConfig + + SASLEnable bool `envconfig:"SASL_ENABLE" required:"false"` + TLSEnable bool `envconfig:"TLS_ENABLE" required:"false"` + + BootstrapServers []string `envconfig:"BOOTSTRAP_SERVERS" required:"true"` + Username string `envconfig:"USERNAME" required:"false"` + Password string `envconfig:"PASSWORD" required:"false"` + Topic string `envconfig:"TOPIC" required:"true"` + GroupID string `envconfig:"GROUP_ID" required:"false"` + + SecurityMechanisms string `envconfig:"SECURITY_MECHANISMS" required:"false"` + KerberosConfigPath string `envconfig:"KERBEROS_CONFIG_PATH" required:"false" ` + KerberosServiceName string `envconfig:"KERBEROS_SERVICE_NAME" required:"false" ` + KerberosKeytabPath string `envconfig:"KERBEROS_KEYTAB_PATH" required:"false"` + KerberosRealm string `envconfig:"KERBEROS_REALM" required:"false"` + KerberosUsername string `envconfig:"KERBEROS_USERNAME" required:"false"` + KerberosPassword string `envconfig:"KERBEROS_PASSWORD" required:"false"` + + CA string `envconfig:"CA" required:"false"` + ClientCert string `envconfig:"CLIENT_CERT" required:"false"` + ClientKey string `envconfig:"CLIENT_KEY" required:"false"` + SkipVerify bool `envconfig:"SKIP_VERIFY" required:"false"` +} diff --git a/pkg/sources/adapter/kafkasource/kafka.go b/pkg/sources/adapter/kafkasource/kafka.go new file mode 100644 index 00000000..cd38b6ea --- /dev/null +++ b/pkg/sources/adapter/kafkasource/kafka.go @@ -0,0 +1,65 @@ +package kafkasource + +import ( + "context" + "fmt" + + "github.com/Shopify/sarama" + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" +) + +const ( + eventType = "com.zeiss.kafka.event" +) + +type consumerGroupHandler struct { + adapter *kafkasourceAdapter +} + +func (a *kafkasourceAdapter) emitEvent(ctx context.Context, msg sarama.ConsumerMessage) error { + event := cloudevents.NewEvent(cloudevents.VersionV1) + event.SetType(eventType) + event.SetSubject("kafka/event") + event.SetSource(msg.Topic) + event.SetID(string(msg.Key)) + + if err := event.SetData(cloudevents.ApplicationJSON, msg.Value); err != nil { + return fmt.Errorf("failed to set event data: %w", err) + } + + if result := a.ceClient.Send(context.Background(), event); !cloudevents.IsACK(result) { + return result + } + return nil +} + +// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). +func (c consumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + for { + select { + case msg, ok := <-claim.Messages(): + if !ok { + return nil + } + if err := c.adapter.emitEvent(session.Context(), *msg); err != nil { + c.adapter.logger.Errorw("Failed to emit event: %v", zap.Error(err)) + // do not mark message + continue + } + session.MarkMessage(msg, "") + + case <-session.Context().Done(): + c.adapter.logger.Infow("Context closed, exiting consumer") + return nil + } + } +} + +func (c consumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { + return nil +} + +func (c consumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error { + return nil +} diff --git a/pkg/sources/adapter/kafkasource/scram_client.go b/pkg/sources/adapter/kafkasource/scram_client.go new file mode 100644 index 00000000..7d266f73 --- /dev/null +++ b/pkg/sources/adapter/kafkasource/scram_client.go @@ -0,0 +1,29 @@ +package kafkasource + +import ( + "github.com/xdg-go/scram" +) + +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/pkg/sources/adapter/kafkasource/stale.go b/pkg/sources/adapter/kafkasource/stale.go new file mode 100644 index 00000000..66bac47f --- /dev/null +++ b/pkg/sources/adapter/kafkasource/stale.go @@ -0,0 +1,67 @@ +package kafkasource + +import ( + "sync" + "time" +) + +type item struct { + object interface{} + added time.Time +} + +// StaleList is a list of items that timeout lazily, +// only checking for item expiration when a new one is added. +// +// This should not be used for storing a big number of items. +type StaleList struct { + items []item + timeout time.Duration + m sync.Mutex +} + +func NewStaleList(timeout time.Duration) *StaleList { + return &StaleList{ + items: []item{}, + timeout: timeout, + } +} + +func (sl *StaleList) count() int { + index := -1 + for i := range sl.items { + if time.Since(sl.items[i].added) > sl.timeout { + index = i + continue + } + break + } + + if index != -1 { + sl.items = sl.items[index+1:] + } + + return len(sl.items) +} + +// AddAndCount adds a new element to the list and updates the count, removing +// any stale items from it. +func (sl *StaleList) AddAndCount(object interface{}) int { + sl.m.Lock() + defer sl.m.Unlock() + + sl.items = append(sl.items, item{ + added: time.Now(), + object: object, + }) + + return sl.count() +} + +// Count updates the count removing any stale items from it. +func (sl *StaleList) Count() int { + sl.m.Lock() + defer sl.m.Unlock() + + return sl.count() +} diff --git a/pkg/sources/adapter/webhooksource/adapter.go b/pkg/sources/adapter/webhooksource/adapter.go new file mode 100644 index 00000000..d56664de --- /dev/null +++ b/pkg/sources/adapter/webhooksource/adapter.go @@ -0,0 +1,38 @@ +package webhooksource + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/sources" +) + +// NewAdapter satisfies pkgadapter.AdapterConstructor. +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + mt := &pkgadapter.MetricTag{ + ResourceGroup: sources.WebhookSourceResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + env := envAcc.(*envAccessor) + + return &webhookHandler{ + eventType: env.EventType, + eventSource: env.EventSource, + extensionAttributesFrom: env.EventExtensionAttributesFrom, + username: env.BasicAuthUsername, + password: env.BasicAuthPassword, + corsAllowOrigin: env.CORSAllowOrigin, + + ceClient: ceClient, + logger: logging.FromContext(ctx), + mt: mt, + } +} + +var _ pkgadapter.Adapter = (*webhookHandler)(nil) diff --git a/pkg/sources/adapter/webhooksource/env.go b/pkg/sources/adapter/webhooksource/env.go new file mode 100644 index 00000000..7763293f --- /dev/null +++ b/pkg/sources/adapter/webhooksource/env.go @@ -0,0 +1,53 @@ +package webhooksource + +import ( + "fmt" + "strings" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" +) + +// NewEnvConfig satisfies pkgadapter.EnvConfigConstructor. +func NewEnvConfig() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + + EventType string `envconfig:"WEBHOOK_EVENT_TYPE" required:"true"` + EventSource string `envconfig:"WEBHOOK_EVENT_SOURCE" required:"true"` + EventExtensionAttributesFrom *ExtensionAttributesFrom `envconfig:"WEBHOOK_EVENT_EXTENSION_ATTRIBUTES_FROM"` + BasicAuthUsername string `envconfig:"WEBHOOK_BASICAUTH_USERNAME"` + BasicAuthPassword string `envconfig:"WEBHOOK_BASICAUTH_PASSWORD"` + CORSAllowOrigin string `envconfig:"WEBHOOK_CORS_ALLOW_ORIGIN"` +} + +type ExtensionAttributesFrom struct { + method bool + path bool + host bool + queries bool + headers bool +} + +// Decode an array of KeyMountedValues +func (ea *ExtensionAttributesFrom) Decode(value string) error { + for _, o := range strings.Split(value, ",") { + switch o { + case "method": + ea.method = true + case "path": + ea.path = true + case "host": + ea.host = true + case "queries": + ea.queries = true + case "headers": + ea.headers = true + default: + return fmt.Errorf("CloudEvent extension from HTTP element not supported: %s", o) + } + } + return nil +} diff --git a/pkg/sources/adapter/webhooksource/webhook.go b/pkg/sources/adapter/webhooksource/webhook.go new file mode 100644 index 00000000..9176133c --- /dev/null +++ b/pkg/sources/adapter/webhooksource/webhook.go @@ -0,0 +1,243 @@ +package webhooksource + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" +) + +const ( + serverPort uint16 = 8080 + serverShutdownGracePeriod = time.Second * 10 + queryPrefix = "q" + headerPrefix = "h" +) + +type webhookHandler struct { + eventType string + eventSource string + extensionAttributesFrom *ExtensionAttributesFrom + username string + password string + corsAllowOrigin string + + ceClient cloudevents.Client + logger *zap.SugaredLogger + mt *pkgadapter.MetricTag +} + +// Start implements pkgadapter.Adapter +// Runs the server for receiving HTTP events until ctx gets cancelled. +func (h *webhookHandler) Start(ctx context.Context) error { + ctx = pkgadapter.ContextWithMetricTag(ctx, h.mt) + + m := http.NewServeMux() + m.HandleFunc("/", h.handleAll(ctx)) + m.HandleFunc("/health", healthCheckHandler) + + s := &http.Server{ + Addr: fmt.Sprintf(":%d", serverPort), + Handler: m, + } + + return runHandler(ctx, s) +} + +// runHandler runs the HTTP event handler until ctx gets cancelled. +func runHandler(ctx context.Context, s *http.Server) error { + logging.FromContext(ctx).Info("Starting webhook event handler") + + errCh := make(chan error) + go func() { + errCh <- s.ListenAndServe() + }() + + handleServerError := func(err error) error { + if err != http.ErrServerClosed { + return fmt.Errorf("during server runtime: %w", err) + } + return nil + } + + select { + case <-ctx.Done(): + logging.FromContext(ctx).Info("HTTP event handler is shutting down") + + ctx, cancel := context.WithTimeout(context.Background(), serverShutdownGracePeriod) + defer cancel() + + if err := s.Shutdown(ctx); err != nil { + return fmt.Errorf("during server shutdown: %w", err) + } + + return handleServerError(<-errCh) + + case err := <-errCh: + return handleServerError(err) + } +} + +// handleAll receives all webhook events at a single resource, it +// is up to this function to parse event wrapper and dispatch. +func (h *webhookHandler) handleAll(ctx context.Context) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if h.corsAllowOrigin != "" { + w.Header().Set("Access-Control-Allow-Origin", h.corsAllowOrigin) + } + + if r.Body == nil { + h.handleError(errors.New("request without body not supported"), http.StatusBadRequest, w) + return + } + + if h.username != "" && h.password != "" { + us, ps, ok := r.BasicAuth() + if !ok { + h.handleError(errors.New("wrong authentication header"), http.StatusBadRequest, w) + return + } + if us != h.username || ps != h.password { + h.handleError(errors.New("credentials are not valid"), http.StatusUnauthorized, w) + return + } + } + + defer r.Body.Close() + body, err := io.ReadAll(r.Body) + if err != nil { + h.handleError(err, http.StatusInternalServerError, w) + return + } + + event := cloudevents.NewEvent(cloudevents.VersionV1) + event.SetType(h.eventType) + event.SetSource(h.eventSource) + + // Add extension attributes if configured + if h.extensionAttributesFrom != nil { + if h.extensionAttributesFrom.path { + event.SetExtension("path", r.URL.Path) + } + if h.extensionAttributesFrom.method { + event.SetExtension("method", r.Method) + } + if h.extensionAttributesFrom.host { + event.SetExtension("host", r.Host) + } + if h.extensionAttributesFrom.queries { + for k, v := range r.URL.Query() { + if len(v) == 1 { + event.SetExtension(sanitizeCloudEventAttributeName(queryPrefix+k), v[0]) + } else { + for i := range v { + event.SetExtension(sanitizeCloudEventAttributeName( + fmt.Sprintf("%s%s%d", queryPrefix, k, i)), v[i]) + } + } + } + } + if h.extensionAttributesFrom.headers { + for k, v := range r.Header { + // Prevent Authorization header from being added + // as a CloudEvent attribute + if k == "Authorization" { + continue + } + if k == "Ce-Id" { + if len(v) != 0 { + event.SetID(v[0]) + } + continue + } + if k == "Ce-Subject" { + if len(v) != 0 { + event.SetSubject(v[0]) + } + continue + } + + if len(v) == 1 { + event.SetExtension(sanitizeCloudEventAttributeName(headerPrefix+k), v[0]) + } else { + for i := range v { + event.SetExtension(sanitizeCloudEventAttributeName( + fmt.Sprintf("%s%s%d", headerPrefix, k, i)), v[i]) + } + } + } + } + } + + if err := event.SetData(r.Header.Get("Content-Type"), body); err != nil { + h.handleError(fmt.Errorf("failed to set event data: %w", err), http.StatusInternalServerError, w) + return + } + + rEvent, result := h.ceClient.Request(ctx, event) + if !cloudevents.IsACK(result) { + h.handleError(fmt.Errorf("could not send Cloud Event: %w", result), http.StatusInternalServerError, w) + return + } + if rEvent == nil || rEvent.Data() == nil { + w.WriteHeader(http.StatusNoContent) + return + } + w.WriteHeader(http.StatusOK) + } +} + +func (h *webhookHandler) handleError(err error, code int, w http.ResponseWriter) { + h.logger.Errorw("An error ocurred", zap.Error(err)) + http.Error(w, err.Error(), code) +} + +func healthCheckHandler(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusOK) +} + +func sanitizeCloudEventAttributeName(name string) string { + // only lowercase accepted + name = strings.ToLower(name) + + // strip non valid characters + needsStripping := false + for i := range name { + if !((name[i] >= 'a' && name[i] <= 'z') || (name[i] >= '0' && name[i] <= '9')) { + needsStripping = true + break + } + } + + if needsStripping { + stripped := []byte{} + for i := range name { + if (name[i] >= 'a' && name[i] <= 'z') || (name[i] >= '0' && name[i] <= '9') { + stripped = append(stripped, name[i]) + } + } + name = string(stripped) + } + + // truncate if longer than 20 characters + if len(name) > 20 { + name = name[:20] + } + + // data is a reserved element at CloudEvents + if name == "data" || name == "path" || name == "method" || name == "host" { + return "data0" + } + return name +} diff --git a/pkg/sources/auth/errors.go b/pkg/sources/auth/errors.go new file mode 100644 index 00000000..a40bb49a --- /dev/null +++ b/pkg/sources/auth/errors.go @@ -0,0 +1,50 @@ +package auth + +// PermanentCredentialsError is an error behaviour which signals that the +// interaction with an external service shouldn't be retried, due to +// credentials which are either invalid, expired, or missing permissions. +// +// This allows callers to handle that special case if required, especially when +// the original error can not be asserted any other way because it is untyped. +// For example, Kubernetes finalizers are unlikely to be able to proceed when +// credentials can not be determined. +// +// Examples of assertion: +// +// _, ok := err.(PermanentCredentialsError) +// +// permErr := (PermanentCredentialsError)(nil) +// ok := errors.As(err, &permErr) +type PermanentCredentialsError interface { + error + IsPermanent() +} + +// NewPermanentCredentialsError marks an auth-related error as permanent (non retryable). +func NewPermanentCredentialsError(err error) error { + return permanentCredentialsError{e: err} +} + +var _ PermanentCredentialsError = (*permanentCredentialsError)(nil) + +// permanentCredentialsError is an opaque error type that wraps another error +// and implements the PermanentCredentialsError error behaviour. +type permanentCredentialsError struct { + e error +} + +// IsFatal implements FatalCredentialsError. +func (permanentCredentialsError) IsPermanent() {} + +// Error implements the error interface. +func (e permanentCredentialsError) Error() string { + if e.e == nil { + return "" + } + return e.e.Error() +} + +// Unwrap implements errors.Unwrap. +func (e permanentCredentialsError) Unwrap() error { + return e.e +} diff --git a/pkg/sources/cloudevents/overrides.go b/pkg/sources/cloudevents/overrides.go new file mode 100644 index 00000000..3cb1bf22 --- /dev/null +++ b/pkg/sources/cloudevents/overrides.go @@ -0,0 +1,46 @@ +package cloudevents + +import ( + "encoding/json" + + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// OverridesJSON returns the JSON representation of a duckv1.CloudEventOverrides, +// after applying some optional transformations to it. +func OverridesJSON(ceo *duckv1.CloudEventOverrides, overrides ...ceOverrideOption) string { + for _, o := range overrides { + ceo = o(ceo) + } + + var ceoStr string + if b, err := json.Marshal(ceo); err == nil { + ceoStr = string(b) + } + + return ceoStr +} + +// ceOverrideOption is a functional option that can alter a duckv1.CloudEventOverrides. +type ceOverrideOption func(*duckv1.CloudEventOverrides) *duckv1.CloudEventOverrides + +// SetExtension returns a ceOverrideOption which sets a given CloudEvents +// extension to an arbitrary value, if this extension isn't already set. +func SetExtension(key, value string) ceOverrideOption { + return func(ceo *duckv1.CloudEventOverrides) *duckv1.CloudEventOverrides { + if ceo == nil { + ceo = &duckv1.CloudEventOverrides{} + } + + ext := &ceo.Extensions + if *ext == nil { + *ext = make(map[string]string, 1) + } + + if _, isSet := (*ext)[key]; !isSet { + (*ext)[key] = value + } + + return ceo + } +} diff --git a/pkg/sources/flow/adapter/jqtransformation/adapter.go b/pkg/sources/flow/adapter/jqtransformation/adapter.go new file mode 100644 index 00000000..92957201 --- /dev/null +++ b/pkg/sources/flow/adapter/jqtransformation/adapter.go @@ -0,0 +1,118 @@ +package jqtransformation + +import ( + "context" + "encoding/json" + + "github.com/itchyny/gojq" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +// NewAdapter adapter implementation +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.JQTransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + replier, err := targetce.New(env.Component, logger.Named("replier"), + targetce.ReplierWithStatefulHeaders(env.BridgeIdentifier), + targetce.ReplierWithStaticResponseType("com.zeiss.jqtransformation.error"), + targetce.ReplierWithPayloadPolicy(targetce.PayloadPolicy(env.CloudEventPayloadPolicy))) + if err != nil { + logger.Panicf("Error creating CloudEvents replier: %v", err) + } + + query, err := gojq.Parse(env.Query) + if err != nil { + logger.Panicf("Error creating query: %v", err) + } + + return &jqadapter{ + query: query, + + sink: env.Sink, + replier: replier, + ceClient: ceClient, + logger: logger, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + } +} + +var _ pkgadapter.Adapter = (*jqadapter)(nil) + +type jqadapter struct { + query *gojq.Query + + sink string + replier *targetce.Replier + ceClient cloudevents.Client + logger *zap.SugaredLogger + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter +} + +// Start is a blocking function and will return if an error occurs +// or the context is cancelled. +func (a *jqadapter) Start(ctx context.Context) error { + a.logger.Info("Starting JQTransformation Adapter") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *jqadapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + var data interface{} + var qd interface{} + if err := event.DataAs(&data); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + + iter := a.query.Run(data) + for { + v, ok := iter.Next() + if !ok { + break + } + if err, ok := v.(error); ok { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + qd = v + } + + // Reserialize the query results for the response + bs, err := json.Marshal(&qd) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + + if err := event.SetData(cloudevents.ApplicationJSON, bs); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if a.sink != "" { + if result := a.ceClient.Send(ctx, event); !cloudevents.IsACK(result) { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, result, "sending the cloudevent to the sink") + } + return nil, cloudevents.ResultACK + } + + return &event, cloudevents.ResultACK +} diff --git a/pkg/sources/flow/adapter/jqtransformation/config.go b/pkg/sources/flow/adapter/jqtransformation/config.go new file mode 100644 index 00000000..3960a24a --- /dev/null +++ b/pkg/sources/flow/adapter/jqtransformation/config.go @@ -0,0 +1,21 @@ +package jqtransformation + +import pkgadapter "knative.dev/eventing/pkg/adapter/v2" + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + // Query represents the jq query to be applied to the incoming event + Query string `envconfig:"JQ_QUERY" required:"true"` + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` + // CloudEvents responses parametrization + CloudEventPayloadPolicy string `envconfig:"EVENTS_PAYLOAD_POLICY" default:"error"` + // Sink defines the target sink for the events. If no Sink is defined the + // events are replied back to the sender. + Sink string `envconfig:"K_SINK"` +} diff --git a/pkg/sources/flow/adapter/synchronizer/adapter.go b/pkg/sources/flow/adapter/synchronizer/adapter.go new file mode 100644 index 00000000..1f7916a3 --- /dev/null +++ b/pkg/sources/flow/adapter/synchronizer/adapter.go @@ -0,0 +1,160 @@ +package synchronizer + +import ( + "context" + "fmt" + "net/http" + "time" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +var _ pkgadapter.Adapter = (*adapter)(nil) + +type adapter struct { + ceClient cloudevents.Client + logger *zap.SugaredLogger + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter + + correlationKey *correlationKey + responseTimeout time.Duration + + sessions *storage + sinkURL string + bridgeID string +} + +// NewAdapter returns adapter implementation. +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.SynchronizerResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + key, err := newCorrelationKey(env.CorrelationKey, env.CorrelationKeyLength) + if err != nil { + logger.Panic("Cannot create an instance of Correlation Key: %v", err) + } + + return &adapter{ + ceClient: ceClient, + logger: logger, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + + correlationKey: key, + responseTimeout: env.ResponseWaitTimeout, + + sessions: newStorage(), + sinkURL: env.Sink, + bridgeID: env.BridgeIdentifier, + } +} + +// Returns if stopCh is closed or Send() returns an error. +func (a *adapter) Start(ctx context.Context) error { + a.logger.Info("Starting Synchronizer Adapter") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *adapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + a.logger.Debugf("Received the event: %s", event.String()) + + if correlationID, exists := a.correlationKey.get(event); exists { + return a.serveResponse(ctx, correlationID, event) + } + + correlationID := a.correlationKey.set(&event) + return a.serveRequest(ctx, correlationID, event) +} + +// serveRequest creates the session for the incoming events and blocks the client. +func (a *adapter) serveRequest(ctx context.Context, correlationID string, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + a.logger.Debugf("Handling request %q", correlationID) + + respChan, err := a.sessions.add(correlationID) + if err != nil { + return nil, cloudevents.NewHTTPResult(http.StatusInternalServerError, "cannot add session %q: %w", correlationID, err) + } + defer a.sessions.delete(correlationID) + + sendErr := make(chan error) + defer close(sendErr) + + go func() { + if res := a.ceClient.Send(cloudevents.ContextWithTarget(ctx, a.sinkURL), a.withBridgeIdentifier(&event)); cloudevents.IsUndelivered(res) { + sendErr <- res + } + }() + + a.logger.Debugf("Waiting response for %q", correlationID) + + select { + case err := <-sendErr: + a.logger.Errorw("Unable to forward the request", zap.Error(err)) + return nil, cloudevents.NewHTTPResult(http.StatusBadRequest, "unable to forward the request: %v", err) + case result := <-respChan: + if result == nil { + a.logger.Errorw("No response", zap.Error(fmt.Errorf("response channel with ID %q is closed", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusInternalServerError, "failed to communicate the response") + } + a.logger.Debugf("Received response for %q", correlationID) + res := a.withBridgeIdentifier(result) + return &res, cloudevents.ResultACK + case <-time.After(a.responseTimeout): + a.logger.Errorw("Request time out", zap.Error(fmt.Errorf("request %q did not receive backend response in time", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusGatewayTimeout, "backend did not respond in time") + } +} + +// serveResponse matches event's correlation key and writes response back to the session's communication channel. +func (a *adapter) serveResponse(ctx context.Context, correlationID string, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + a.logger.Debugf("Handling response %q", correlationID) + + responseChan, exists := a.sessions.get(correlationID) + if !exists { + a.logger.Errorw("Session not found", zap.Error(fmt.Errorf("client session with ID %q does not exist", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusBadGateway, "client session does not exist") + } + + a.logger.Debugf("Forwarding response %q", correlationID) + select { + case responseChan <- &event: + a.logger.Debugf("Response %q completed", correlationID) + return nil, cloudevents.ResultACK + default: + a.logger.Errorw("Unable to forward the response", zap.Error(fmt.Errorf("client connection with ID %q is closed", correlationID))) + return nil, cloudevents.NewHTTPResult(http.StatusBadGateway, "client connection is closed") + } +} + +// withBridgeIdentifier adds Bridge ID to the event context. +func (a *adapter) withBridgeIdentifier(event *cloudevents.Event) cloudevents.Event { + if a.bridgeID == "" { + return *event + } + if bid, err := event.Context.GetExtension(targetce.StatefulWorkflowHeader); err != nil && bid != "" { + return *event + } + event.SetExtension(targetce.StatefulWorkflowHeader, a.bridgeID) + return *event +} diff --git a/pkg/sources/flow/adapter/synchronizer/correlation.go b/pkg/sources/flow/adapter/synchronizer/correlation.go new file mode 100644 index 00000000..74e635fc --- /dev/null +++ b/pkg/sources/flow/adapter/synchronizer/correlation.go @@ -0,0 +1,75 @@ +package synchronizer + +import ( + "fmt" + "math/rand" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2/event" +) + +// Correlation Key charset. +const correlationKeycharset = "abcdefghijklmnopqrstuvwxyz0123456789" + +var ( + // CloudEvent attributes cannot be used as a correltaion key. + restrictedKeys = []string{ + "id", + "type", + "time", + "subject", + "schemaurl", + "dataschema", + "specversion", + "datamediatype", + "datacontenttype", + "datacontentencoding", + } + + seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano())) +) + +// correlationKey is the correlation attribute for the CloudEvents. +type correlationKey struct { + attribute string + length int +} + +// NewCorrelationKey returns an instance of the CloudEvent Correlation key. +func newCorrelationKey(attribute string, length int) (*correlationKey, error) { + for _, rk := range restrictedKeys { + if attribute == rk { + return nil, fmt.Errorf("%q cannot be used as a correlation key", attribute) + } + } + + return &correlationKey{ + attribute: attribute, + length: length, + }, nil +} + +// Get returns the value of Correlation Key. +func (k *correlationKey) get(event cloudevents.Event) (string, bool) { + if val, exists := event.Extensions()[k.attribute]; exists { + return val.(string), true + } + return "", false +} + +// Set updates the CloudEvent's context with the random Correlation Key value. +func (k *correlationKey) set(event *cloudevents.Event) string { + correlationID := randString(k.length) + event.SetExtension(k.attribute, correlationID) + return correlationID +} + +// randString generates the random string with fixed length. +func randString(length int) string { + k := make([]byte, length) + l := len(correlationKeycharset) - 1 + for i := range k { + k[i] = correlationKeycharset[seededRand.Intn(l)] + } + return string(k) +} diff --git a/pkg/sources/flow/adapter/synchronizer/env.go b/pkg/sources/flow/adapter/synchronizer/env.go new file mode 100644 index 00000000..94f91820 --- /dev/null +++ b/pkg/sources/flow/adapter/synchronizer/env.go @@ -0,0 +1,23 @@ +package synchronizer + +import ( + "time" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" +) + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + + CorrelationKey string `envconfig:"CORRELATION_KEY"` + CorrelationKeyLength int `envconfig:"CORRELATION_KEY_LENGTH"` + ResponseWaitTimeout time.Duration `envconfig:"RESPONSE_WAIT_TIMEOUT"` + + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` +} diff --git a/pkg/sources/flow/adapter/synchronizer/storage.go b/pkg/sources/flow/adapter/synchronizer/storage.go new file mode 100644 index 00000000..151fee7e --- /dev/null +++ b/pkg/sources/flow/adapter/synchronizer/storage.go @@ -0,0 +1,53 @@ +package synchronizer + +import ( + "fmt" + "sync" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +// storage holds the map of open connections and corresponding channels. +type storage struct { + sync.Mutex + sessions map[string]chan *cloudevents.Event +} + +// newStorage returns an instance of the sessions storage. +func newStorage() *storage { + return &storage{ + sessions: make(map[string]chan *cloudevents.Event), + } +} + +// add creates the new communication channel and adds it to the session storage. +func (s *storage) add(id string) (<-chan *cloudevents.Event, error) { + s.Lock() + defer s.Unlock() + + if _, exists := s.sessions[id]; exists { + return nil, fmt.Errorf("session already exists") + } + + c := make(chan *cloudevents.Event) + s.sessions[id] = c + return c, nil +} + +// delete closes the communication channel and removes it from the storage. +func (s *storage) delete(id string) { + s.Lock() + defer s.Unlock() + + close(s.sessions[id]) + delete(s.sessions, id) +} + +// open returns the communication channel for the session id. +func (s *storage) get(id string) (chan<- *cloudevents.Event, bool) { + s.Lock() + defer s.Unlock() + + session, exists := s.sessions[id] + return session, exists +} diff --git a/pkg/sources/flow/adapter/transformation/adapter.go b/pkg/sources/flow/adapter/transformation/adapter.go new file mode 100644 index 00000000..c6997f40 --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/adapter.go @@ -0,0 +1,240 @@ +package transformation + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/metrics" +) + +type envConfig struct { + pkgadapter.EnvConfig + + // Sink URL where to send cloudevents + Sink string `envconfig:"K_SINK"` + + // Transformation specifications + TransformationContext string `envconfig:"TRANSFORMATION_CONTEXT"` + TransformationData string `envconfig:"TRANSFORMATION_DATA"` +} + +// adapter contains Pipelines for CE transformations and CloudEvents client. +type adapter struct { + ContextPipeline *Pipeline + DataPipeline *Pipeline + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter + + sink string + + client cloudevents.Client + logger *zap.SugaredLogger +} + +// ceContext represents CloudEvents context structure but with exported Extensions. +type ceContext struct { + *cloudevents.EventContextV1 `json:",inline"` + Extensions map[string]interface{} `json:"Extensions,omitempty"` +} + +// NewEnvConfig satisfies pkgadapter.EnvConfigConstructor. +func NewEnvConfig() pkgadapter.EnvConfigAccessor { + return &envConfig{} +} + +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.TransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envConfig) + + trnContext, trnData := []v1alpha1.Transform{}, []v1alpha1.Transform{} + err := json.Unmarshal([]byte(env.TransformationContext), &trnContext) + if err != nil { + logger.Fatalf("Cannot unmarshal context transformation env variable: %v", err) + } + err = json.Unmarshal([]byte(env.TransformationData), &trnData) + if err != nil { + logger.Fatalf("Cannot unmarshal data transformation env variable: %v", err) + } + + sharedStorage := storage.New() + + contextPl, err := newPipeline(trnContext, sharedStorage) + if err != nil { + logger.Fatalf("Cannot create context transformation pipeline: %v", err) + } + + dataPl, err := newPipeline(trnData, sharedStorage) + if err != nil { + logger.Fatalf("Cannot create data transformation pipeline: %v", err) + } + + return &adapter{ + ContextPipeline: contextPl, + DataPipeline: dataPl, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + + sink: env.Sink, + client: ceClient, + logger: logger, + } +} + +// Start runs CloudEvent receiver and applies transformation Pipeline +// on incoming events. +func (t *adapter) Start(ctx context.Context) error { + t.logger.Info("Starting Transformation adapter") + + var receiver interface{} + receiver = t.receiveAndReply + if t.sink != "" { + ctx = cloudevents.ContextWithTarget(ctx, t.sink) + receiver = t.receiveAndSend + } + + ctx = pkgadapter.ContextWithMetricTag(ctx, t.mt) + + return t.client.StartReceiver(ctx, receiver) +} + +func (t *adapter) receiveAndReply(event cloudevents.Event) (*cloudevents.Event, error) { + ceTypeTag := metrics.TagEventType(event.Type()) + ceSrcTag := metrics.TagEventSource(event.Source()) + + start := time.Now() + defer func() { + t.sr.ReportProcessingLatency(time.Since(start), ceTypeTag, ceSrcTag) + }() + + result, err := t.applyTransformations(event) + if err != nil { + t.sr.ReportProcessingError(false, ceTypeTag, ceSrcTag) + } else { + t.sr.ReportProcessingSuccess(ceTypeTag, ceSrcTag) + } + + return result, err +} + +func (t *adapter) receiveAndSend(ctx context.Context, event cloudevents.Event) error { + ceTypeTag := metrics.TagEventType(event.Type()) + ceSrcTag := metrics.TagEventSource(event.Source()) + + start := time.Now() + defer func() { + t.sr.ReportProcessingLatency(time.Since(start), ceTypeTag, ceSrcTag) + }() + + result, err := t.applyTransformations(event) + if err != nil { + t.sr.ReportProcessingError(false, ceTypeTag, ceSrcTag) + return err + } + + if result := t.client.Send(ctx, *result); !cloudevents.IsACK(result) { + t.sr.ReportProcessingError(false, ceTypeTag, ceSrcTag) + return result + } + + t.sr.ReportProcessingSuccess(ceTypeTag, ceSrcTag) + return nil +} + +func (t *adapter) applyTransformations(event cloudevents.Event) (*cloudevents.Event, error) { + // HTTPTargets sets content type from HTTP headers, i.e.: + // "datacontenttype: application/json; charset=utf-8" + // so we must use "contains" instead of strict equality + if !strings.Contains(event.DataContentType(), cloudevents.ApplicationJSON) { + err := fmt.Errorf("CE Content-Type %q is not supported", event.DataContentType()) + t.logger.Errorw("Bad Content-Type", zap.Error(err)) + return nil, err + } + + localContext := ceContext{ + EventContextV1: event.Context.AsV1(), + Extensions: event.Context.AsV1().GetExtensions(), + } + + localContextBytes, err := json.Marshal(localContext) + if err != nil { + t.logger.Errorw("Cannot encode CE context", zap.Error(err)) + return nil, fmt.Errorf("cannot encode CE context: %w", err) + } + + // init indicates if we need to run initial step transformation + init := true + var errs []error + + eventUniqueID := fmt.Sprintf("%s-%s", event.ID(), event.Source()) + + // remove event-related variables after the transformation is done. + // since the storage is shared, flush can be done for one pipeline. + defer t.ContextPipeline.Storage.Flush(eventUniqueID) + + // Run init step such as load Pipeline variables first + eventContext, err := t.ContextPipeline.apply(eventUniqueID, localContextBytes, init) + if err != nil { + errs = append(errs, err) + } + eventPayload, err := t.DataPipeline.apply(eventUniqueID, event.Data(), init) + if err != nil { + errs = append(errs, err) + } + + // CE Context transformation + if eventContext, err = t.ContextPipeline.apply(eventUniqueID, eventContext, !init); err != nil { + errs = append(errs, err) + } + + newContext := ceContext{} + if err := json.Unmarshal(eventContext, &newContext); err != nil { + t.logger.Errorw("Cannot decode CE new context", zap.Error(err)) + return nil, fmt.Errorf("cannot decode CE new context: %w", err) + } + event.Context = newContext + for k, v := range newContext.Extensions { + if err := event.Context.SetExtension(k, v); err != nil { + t.logger.Errorw("Cannot set CE extension", zap.Error(err)) + return nil, fmt.Errorf("cannot set CE extension: %w", err) + } + } + + // CE Data transformation + if eventPayload, err = t.DataPipeline.apply(eventUniqueID, eventPayload, !init); err != nil { + errs = append(errs, err) + } + if err = event.SetData(cloudevents.ApplicationJSON, eventPayload); err != nil { + t.logger.Errorw("Cannot set CE data", zap.Error(err)) + return nil, fmt.Errorf("cannot set CE data: %w", err) + } + // Failed transformation operations should not stop event flow + // therefore, just log the errors + if len(errs) != 0 { + t.logger.Errorw("Event transformation errors", zap.Errors("errors", errs)) + } + + return &event, nil +} diff --git a/pkg/sources/flow/adapter/transformation/common/convert/convert.go b/pkg/sources/flow/adapter/transformation/common/convert/convert.go new file mode 100644 index 00000000..7f757267 --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/common/convert/convert.go @@ -0,0 +1,104 @@ +package convert + +import ( + "strconv" + "strings" +) + +// SliceToMap converts string slice into map that can be encoded into JSON. +func SliceToMap(path []string, value interface{}) map[string]interface{} { + var array bool + var index int + i := strings.Index(path[0], "[") + if i > -1 && len(path[0]) > i+1 { + indexStr := path[0][i+1 : len(path[0])-1] + indexInt, err := strconv.Atoi(indexStr) + if err == nil { + index = indexInt + array = true + path[0] = path[0][:i] + } + } + + if len(path) == 1 { + if !array { + return map[string]interface{}{ + path[0]: value, + } + } + arr := make([]interface{}, index+1) + arr[index] = value + return map[string]interface{}{ + path[0]: arr, + } + } + + key := path[0] + path = path[1:] + m := SliceToMap(path, value) + if !array { + return map[string]interface{}{ + key: m, + } + } + arr := make([]interface{}, index+1) + arr[index] = m + return map[string]interface{}{ + key: arr, + } +} + +// MergeJSONWithMap accepts interface (effectively, JSON) and a map and merges them together. +// Source map keys are being overwritten by appendix keys if they overlap. +func MergeJSONWithMap(source, appendix interface{}) interface{} { + switch appendixValue := appendix.(type) { + case nil: + return source + case float64, bool, string: + return appendixValue + case []interface{}: + sourceInterface, ok := source.([]interface{}) + if !ok { + return appendixValue + } + resArrLen := len(sourceInterface) + if len(appendixValue) > resArrLen { + resArrLen = len(appendixValue) + } + resArr := make([]interface{}, resArrLen) + for i := range resArr { + var a, b interface{} + if i < len(appendixValue) { + b = appendixValue[i] + } + if i < len(sourceInterface) { + a = sourceInterface[i] + } + resArr[i] = MergeJSONWithMap(a, b) + } + source = resArr + case map[string]interface{}: + switch s := source.(type) { + case float64, bool, string: + return appendixValue + case nil: + source = make(map[string]interface{}) + return MergeJSONWithMap(source, appendixValue) + case map[string]interface{}: + for k, v := range appendixValue { + if k == "" { + return MergeJSONWithMap(s, v) + } + s[k] = MergeJSONWithMap(s[k], v) + } + source = s + case []interface{}: + for k, v := range appendixValue { + if k == "" { + return MergeJSONWithMap(s, v) + } + } + } + } + return source +} diff --git a/pkg/sources/flow/adapter/transformation/common/convert/convert_test.go b/pkg/sources/flow/adapter/transformation/common/convert/convert_test.go new file mode 100644 index 00000000..6e294047 --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/common/convert/convert_test.go @@ -0,0 +1,106 @@ +package convert + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSliceToMap(t *testing.T) { + testCases := []struct { + path string + value string + result map[string]interface{} + }{ + { + path: "foo.bar", + value: "", + result: map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": "", + }, + }, + }, + { + path: "foo.[0].bar", + value: "", + result: map[string]interface{}{ + "foo": map[string]interface{}{ + "": []interface{}{ + map[string]interface{}{ + "bar": "", + }, + }, + }, + }, + }, + { + path: "[1].foo", + value: "bar", + result: map[string]interface{}{ + "": []interface{}{ + nil, + map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.path, func(t *testing.T) { + assert.Equal(t, tc.result, SliceToMap(strings.Split(tc.path, "."), tc.value)) + }) + } +} + +func TestMergeJSONWithMap(t *testing.T) { + testCases := []struct { + source string + appendix string + result interface{} + }{ + { + source: `{"old":"value"}`, + appendix: "foo.bar", + result: map[string]interface{}{ + "old": "value", + "foo": "bar", + }, + }, { + source: `{"old":"value"}`, + appendix: "foo.bar[1].baz", + result: map[string]interface{}{ + "old": "value", + "foo": map[string]interface{}{ + "bar": []interface{}{ + nil, + "baz", + }, + }, + }, + }, { + source: `{"old":"value"}`, + appendix: "[1].foo.bar", + result: []interface{}{ + nil, + map[string]interface{}{ + "foo": "bar", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.appendix, func(t *testing.T) { + var data interface{} + assert.NoError(t, json.Unmarshal([]byte(tc.source), &data)) + s := strings.Split(tc.appendix, ".") + appendix := SliceToMap(s[:len(s)-1], s[len(s)-1]) + assert.Equal(t, MergeJSONWithMap(data, appendix), tc.result) + }) + } +} diff --git a/pkg/sources/flow/adapter/transformation/common/storage/storage.go b/pkg/sources/flow/adapter/transformation/common/storage/storage.go new file mode 100644 index 00000000..08b92c07 --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/common/storage/storage.go @@ -0,0 +1,69 @@ +package storage + +import ( + "sync" +) + +// Storage is a simple object that provides thread safe +// methods to read and write into a map. +type Storage struct { + data map[string]map[string]interface{} + mux sync.RWMutex +} + +// New returns an instance of Storage. +func New() *Storage { + return &Storage{ + data: make(map[string]map[string]interface{}), + mux: sync.RWMutex{}, + } +} + +// Set writes a value interface to a string key. +func (s *Storage) Set(eventID, key string, value interface{}) { + s.mux.Lock() + defer s.mux.Unlock() + if s.data[eventID] == nil { + s.data[eventID] = make(map[string]interface{}) + } + s.data[eventID][key] = value +} + +// Get reads value by a key. +func (s *Storage) Get(eventID string, key string) interface{} { + s.mux.RLock() + defer s.mux.RUnlock() + if s.data[eventID] == nil { + return nil + } + return s.data[eventID][key] +} + +// ListEventVariables returns the slice of variables created for EventID. +func (s *Storage) ListEventVariables(eventID string) []string { + s.mux.RLock() + defer s.mux.RUnlock() + list := []string{} + for k := range s.data[eventID] { + list = append(list, k) + } + return list +} + +// ListEventIDs returns the list of stored event IDs. +func (s *Storage) ListEventIDs() []string { + s.mux.RLock() + defer s.mux.RUnlock() + list := []string{} + for k := range s.data { + list = append(list, k) + } + return list +} + +// Flush removes variables by their parent event ID. +func (s *Storage) Flush(eventID string) { + s.mux.Lock() + defer s.mux.Unlock() + delete(s.data, eventID) +} diff --git a/pkg/sources/flow/adapter/transformation/common/utils.go b/pkg/sources/flow/adapter/transformation/common/utils.go new file mode 100644 index 00000000..f93aaf36 --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/common/utils.go @@ -0,0 +1,55 @@ +package common + +// ReadValue returns the source object item located at the requested path. +func ReadValue(source interface{}, path map[string]interface{}) interface{} { + var result interface{} + for k, v := range path { + switch value := v.(type) { + case float64, bool, string: + sourceMap, ok := source.(map[string]interface{}) + if !ok { + break + } + result = sourceMap[k] + case []interface{}: + if k != "" { + // array is inside the object + // {"foo":[{},{},{}]} + sourceMap, ok := source.(map[string]interface{}) + if !ok { + break + } + source, ok = sourceMap[k] + if !ok { + break + } + } + // array is a root object + // [{},{},{}] + sourceArr, ok := source.([]interface{}) + if !ok { + break + } + + index := len(value) - 1 + if index >= len(sourceArr) { + break + } + result = ReadValue(sourceArr[index], value[index].(map[string]interface{})) + case map[string]interface{}: + if k == "" { + result = source + break + } + sourceMap, ok := source.(map[string]interface{}) + if !ok { + break + } + if _, ok := sourceMap[k]; !ok { + break + } + result = ReadValue(sourceMap[k], value) + } + } + return result +} diff --git a/pkg/sources/flow/adapter/transformation/pipeline.go b/pkg/sources/flow/adapter/transformation/pipeline.go new file mode 100644 index 00000000..dbc6708a --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/pipeline.go @@ -0,0 +1,83 @@ +package transformation + +import ( + "fmt" + "strings" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/add" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/delete" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/parse" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/shift" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer/store" +) + +const ( + defaultEventPathSeparator = "." +) + +// Pipeline is a set of Transformations that are +// sequentially applied to JSON data. +type Pipeline struct { + Transformers []transformer.Transformer + Storage *storage.Storage +} + +// register loads available Transformation into a named map. +func register() map[string]transformer.Transformer { + transformations := make(map[string]transformer.Transformer) + + add.Register(transformations) + delete.Register(transformations) + shift.Register(transformations) + store.Register(transformations) + parse.Register(transformations) + + return transformations +} + +// newPipeline loads available Transformations and creates a Pipeline. +func newPipeline(transformations []v1alpha1.Transform, storage *storage.Storage) (*Pipeline, error) { + availableTransformers := register() + pipeline := []transformer.Transformer{} + + for _, transformation := range transformations { + operation, exist := availableTransformers[transformation.Operation] + if !exist { + return nil, fmt.Errorf("transformation %q not found", transformation.Operation) + } + for _, kv := range transformation.Paths { + separator := defaultEventPathSeparator + if kv.Separator != "" { + separator = kv.Separator + } + transformer := operation.New(kv.Key, kv.Value, separator) + transformer.SetStorage(storage) + pipeline = append(pipeline, transformer) + } + } + + return &Pipeline{ + Transformers: pipeline, + Storage: storage, + }, nil +} + +// Apply applies Pipeline transformations. +func (p *Pipeline) apply(eventID string, data []byte, init bool) ([]byte, error) { + var err error + var errs []string + for _, v := range p.Transformers { + if init == v.InitStep() { + if data, err = v.Apply(eventID, data); err != nil { + errs = append(errs, err.Error()) + } + } + } + if len(errs) != 0 { + return data, fmt.Errorf(strings.Join(errs, ",")) + } + return data, nil +} diff --git a/pkg/sources/flow/adapter/transformation/transformer/add/add.go b/pkg/sources/flow/adapter/transformation/transformer/add/add.go new file mode 100644 index 00000000..c86df84a --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/transformer/add/add.go @@ -0,0 +1,141 @@ +package add + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Add)(nil) + +// Add object implements Transformer interface. +type Add struct { + Path string + Value string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = false + +// operationName is used to identify this transformation. +var operationName string = "add" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Add{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (a *Add) SetStorage(storage *storage.Storage) { + a.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (a *Add) InitStep() bool { + return InitStep +} + +// New returns a new instance of Add object. +func (a *Add) New(key, value, separator string) transformer.Transformer { + return &Add{ + Path: key, + Value: value, + Separator: separator, + + variables: a.variables, + } +} + +// Apply is a main method of Transformation that adds any type of +// variables into existing JSON. +func (a *Add) Apply(eventID string, data []byte) ([]byte, error) { + input := convert.SliceToMap(strings.Split(a.Path, a.Separator), a.composeValue(eventID)) + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + + result := convert.MergeJSONWithMap(event, input) + output, err := json.Marshal(result) + if err != nil { + return data, err + } + + return output, nil +} + +func (a *Add) retrieveVariable(eventID, key string) interface{} { + if value := a.variables.Get(eventID, key); value != nil { + return value + } + return key +} + +func (a *Add) composeValue(eventID string) interface{} { + result := a.Value + for _, key := range a.variables.ListEventVariables(eventID) { + // limit the number of iterations to prevent the loop if + // "add" variable is not updating the result (variable is not defined). + variableKeysInResult := strings.Count(result, key) + for i := 0; i <= variableKeysInResult; i++ { + keyIndex := strings.Index(result, key) + if keyIndex == -1 { + continue + } + + storedValue := a.retrieveVariable(eventID, key) + + if result == key { + return storedValue + } + + openingBracketIndex := -1 + closingBracketIndex := -1 + for i := keyIndex; i >= 0; i-- { + if string(result[i]) == "(" { + openingBracketIndex = i + break + } + } + for i := keyIndex; i < len(result); i++ { + if string(result[i]) == ")" { + closingBracketIndex = i + break + } + } + + // there is no brackets in the value + if (openingBracketIndex == -1 || closingBracketIndex == -1) || + // brackets are screened with "\" symbol + ((openingBracketIndex > 0 && string(result[openingBracketIndex-1]) == "\\") || + string(result[closingBracketIndex-1]) == "\\") || + // brackets are not surrounding the key + !(openingBracketIndex < keyIndex && closingBracketIndex >= keyIndex+len(key)) { + result = fmt.Sprintf("%s%v%s", result[:keyIndex], storedValue, result[keyIndex+len(key):]) + continue + } + + if storedValue == key { + // stored value that equals the variable key means no stored value is available + result = fmt.Sprintf("%s%s", result[:openingBracketIndex], result[closingBracketIndex+1:]) + continue + } + + result = result[:openingBracketIndex] + result[openingBracketIndex+1:] + result = result[:closingBracketIndex-1] + result[closingBracketIndex:] + result = fmt.Sprintf("%s%v%s", result[:keyIndex-1], storedValue, result[keyIndex+len(key)-1:]) + } + } + return result +} diff --git a/pkg/sources/flow/adapter/transformation/transformer/delete/delete.go b/pkg/sources/flow/adapter/transformation/transformer/delete/delete.go new file mode 100644 index 00000000..d1b0516c --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/transformer/delete/delete.go @@ -0,0 +1,164 @@ +package delete + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Delete)(nil) + +// Delete object implements Transformer interface. +type Delete struct { + Path string + Value string + Type string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = false + +// operationName is used to identify this transformation. +var operationName string = "delete" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Delete{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (d *Delete) SetStorage(storage *storage.Storage) { + d.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (d *Delete) InitStep() bool { + return InitStep +} + +// New returns a new instance of Delete object. +func (d *Delete) New(key, value, separator string) transformer.Transformer { + return &Delete{ + Path: key, + Value: value, + Separator: separator, + + variables: d.variables, + } +} + +// Apply is a main method of Transformation that removed any type of +// variables from existing JSON. +func (d *Delete) Apply(eventID string, data []byte) ([]byte, error) { + d.Value = d.retrieveString(eventID, d.Value) + + result, err := d.parse(data, "", "") + if err != nil { + return data, err + } + + output, err := json.Marshal(result) + if err != nil { + return data, err + } + + return output, nil +} + +func (d *Delete) retrieveString(eventID, key string) string { + if value := d.variables.Get(eventID, key); value != nil { + if str, ok := value.(string); ok { + return str + } + } + return key +} + +func (d *Delete) parse(data interface{}, key, path string) (interface{}, error) { + output := make(map[string]interface{}) + // TODO: keep only one filter call + if d.filter(path, data) { + return nil, nil + } + switch value := data.(type) { + case []byte: + var m interface{} + if err := json.Unmarshal(value, &m); err != nil { + return nil, fmt.Errorf("unmarshal err: %v", err) + } + o, err := d.parse(m, key, path) + if err != nil { + return nil, fmt.Errorf("recursive call in []bytes case: %v", err) + } + return o, nil + case float64, bool, string, nil: + return value, nil + case []interface{}: + slice := []interface{}{} + for i, v := range value { + o, err := d.parse(v, key, fmt.Sprintf("%s[%d]", path, i)) + if err != nil { + return nil, fmt.Errorf("recursive call in []interface case: %v", err) + } + slice = append(slice, o) + } + return slice, nil + case map[string]interface{}: + for k, v := range value { + subPath := fmt.Sprintf("%s.%s", path, k) + if d.filter(subPath, v) { + continue + } + o, err := d.parse(v, k, subPath) + if err != nil { + return nil, fmt.Errorf("recursive call in map[]interface case: %v", err) + } + output[k] = o + } + } + + return output, nil +} + +func (d *Delete) filter(path string, value interface{}) bool { + switch { + case d.Path != "" && d.Value != "": + return d.filterPathAndValue(path, value) + case d.Path != "": + return d.filterPath(path) + case d.Value != "": + return d.filterValue(value) + } + // consider empty key and path as "delete any" + return true +} + +func (d *Delete) filterPath(path string) bool { + return d.Separator+d.Path == path +} + +func (d *Delete) filterValue(value interface{}) bool { + switch v := value.(type) { + case string: + return v == d.Value + case float64: + return d.Value == strconv.FormatFloat(v, 'f', -1, 64) + case bool: + return d.Value == fmt.Sprintf("%t", v) + } + return false +} + +func (d *Delete) filterPathAndValue(path string, value interface{}) bool { + return d.filterPath(path) && d.filterValue(value) +} diff --git a/pkg/sources/flow/adapter/transformation/transformer/parse/parse.go b/pkg/sources/flow/adapter/transformation/transformer/parse/parse.go new file mode 100644 index 00000000..d2888aee --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/transformer/parse/parse.go @@ -0,0 +1,93 @@ +package parse + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Parse)(nil) + +// Parse object implements Transformer interface. +type Parse struct { + Path string + Value string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = true + +// operationName is used to identify this transformation. +var operationName string = "parse" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Parse{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (p *Parse) SetStorage(storage *storage.Storage) { + p.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (p *Parse) InitStep() bool { + return InitStep +} + +// New returns a new instance of Parse object. +func (p *Parse) New(key, value, separator string) transformer.Transformer { + return &Parse{ + Path: key, + Value: value, + Separator: separator, + + variables: p.variables, + } +} + +// Apply is a main method of Transformation that parse JSON values +// into variables that can be used by other Transformations in a pipeline. +func (p *Parse) Apply(eventID string, data []byte) ([]byte, error) { + path := convert.SliceToMap(strings.Split(p.Path, p.Separator), "") + + switch p.Value { + case "json", "JSON": + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + jsonValue, err := parseJSON(common.ReadValue(event, path)) + if err != nil { + return data, err + } + newObject := convert.SliceToMap(strings.Split(p.Path, p.Separator), jsonValue) + return json.Marshal(convert.MergeJSONWithMap(event, newObject)) + default: + return data, fmt.Errorf("parse operation does not support %q type of value", p.Value) + } +} + +func parseJSON(data interface{}) (interface{}, error) { + str, ok := data.(string) + if !ok { + return nil, fmt.Errorf("unable to cast the value to string type") + } + var object interface{} + if err := json.Unmarshal([]byte(str), &object); err != nil { + return nil, fmt.Errorf("failed to unmarshal value: %w", err) + } + return object, nil +} diff --git a/pkg/sources/flow/adapter/transformation/transformer/shift/shift.go b/pkg/sources/flow/adapter/transformation/transformer/shift/shift.go new file mode 100644 index 00000000..f4d0cd5c --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/transformer/shift/shift.go @@ -0,0 +1,193 @@ +package shift + +import ( + "encoding/json" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Shift)(nil) + +// Shift object implements Transformer interface. +type Shift struct { + Path string + NewPath string + Value string + Separator string + + variables *storage.Storage +} + +const delimeter string = ":" + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = false + +// operationName is used to identify this transformation. +var operationName string = "shift" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Shift{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (s *Shift) SetStorage(storage *storage.Storage) { + s.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (s *Shift) InitStep() bool { + return InitStep +} + +// New returns a new instance of Shift object. +func (s *Shift) New(key, value, separator string) transformer.Transformer { + // doubtful scheme, review needed + keys := strings.Split(key, delimeter) + if len(keys) != 2 { + return nil + } + return &Shift{ + Path: keys[0], + NewPath: keys[1], + Value: value, + Separator: separator, + + variables: s.variables, + } +} + +// Apply is a main method of Transformation that moves existing +// values to a new locations. +func (s *Shift) Apply(eventID string, data []byte) ([]byte, error) { + oldPath := convert.SliceToMap(strings.Split(s.Path, s.Separator), "") + + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + + newEvent, value := extractValue(event, oldPath) + if s.Value != "" { + if !equal(s.retrieveInterface(eventID, s.Value), value) { + return data, nil + } + } + if value == nil { + return data, nil + } + + newPath := convert.SliceToMap(strings.Split(s.NewPath, s.Separator), value) + result := convert.MergeJSONWithMap(newEvent, newPath) + output, err := json.Marshal(result) + if err != nil { + return data, err + } + + return output, nil +} + +func (s *Shift) retrieveInterface(eventID, key string) interface{} { + if value := s.variables.Get(eventID, key); value != nil { + return value + } + return key +} + +func extractValue(source interface{}, path map[string]interface{}) (map[string]interface{}, interface{}) { + var ok bool + var result interface{} + sourceMap := make(map[string]interface{}) + for k, v := range path { + switch value := v.(type) { + case float64, bool, string: + sourceMap, ok = source.(map[string]interface{}) + if !ok { + break + } + result = sourceMap[k] + delete(sourceMap, k) + case []interface{}: + if k != "" { + // array is inside the object + // {"foo":[{},{},{}]} + sourceMap, ok = source.(map[string]interface{}) + if !ok { + break + } + source, ok = sourceMap[k] + if !ok { + break + } + } + // array is a root object + // [{},{},{}] + sourceArr, ok := source.([]interface{}) + if !ok { + break + } + + index := len(value) - 1 + if index >= len(sourceArr) { + break + } + + m, ok := value[index].(map[string]interface{}) + if ok { + sourceArr[index], result = extractValue(sourceArr[index].(map[string]interface{}), m) + sourceMap[k] = sourceArr + break + } + result = sourceArr[index] + sourceMap[k] = sourceArr[:index] + if len(sourceArr) > index { + sourceMap[k] = append(sourceArr[:index], sourceArr[index+1:]...) + } + case map[string]interface{}: + if k == "" { + result = source + break + } + sourceMap, ok = source.(map[string]interface{}) + if !ok { + break + } + if _, ok := sourceMap[k]; !ok { + break + } + sourceMap[k], result = extractValue(sourceMap[k], value) + case nil: + sourceMap[k] = nil + } + } + return sourceMap, result +} + +func equal(a, b interface{}) bool { + switch value := b.(type) { + case string: + v, ok := a.(string) + if ok && v == value { + return true + } + case bool: + v, ok := a.(bool) + if ok && v == value { + return true + } + case float64: + v, ok := a.(float64) + if ok && v == value { + return true + } + } + return false +} diff --git a/pkg/sources/flow/adapter/transformation/transformer/store/store.go b/pkg/sources/flow/adapter/transformation/transformer/store/store.go new file mode 100644 index 00000000..8e9326df --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/transformer/store/store.go @@ -0,0 +1,75 @@ +package store + +import ( + "encoding/json" + "strings" + + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/convert" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/transformer" +) + +var _ transformer.Transformer = (*Store)(nil) + +// Store object implements Transformer interface. +type Store struct { + Path string + Value string + Separator string + + variables *storage.Storage +} + +// InitStep is used to figure out if this operation should +// run before main Transformations. For example, Store +// operation needs to run first to load all Pipeline variables. +var InitStep bool = true + +// operationName is used to identify this transformation. +var operationName string = "store" + +// Register adds this transformation to the map which will +// be used to create Transformation pipeline. +func Register(m map[string]transformer.Transformer) { + m[operationName] = &Store{} +} + +// SetStorage sets a shared Storage with Pipeline variables. +func (s *Store) SetStorage(storage *storage.Storage) { + s.variables = storage +} + +// InitStep returns "true" if this Transformation should run +// as init step. +func (s *Store) InitStep() bool { + return InitStep +} + +// New returns a new instance of Store object. +func (s *Store) New(key, value, separator string) transformer.Transformer { + return &Store{ + Path: key, + Value: value, + Separator: separator, + + variables: s.variables, + } +} + +// Apply is a main method of Transformation that stores JSON values +// into variables that can be used by other Transformations in a pipeline. +func (s *Store) Apply(eventID string, data []byte) ([]byte, error) { + path := convert.SliceToMap(strings.Split(s.Value, s.Separator), "") + + var event interface{} + if err := json.Unmarshal(data, &event); err != nil { + return data, err + } + + value := common.ReadValue(event, path) + + s.variables.Set(eventID, s.Path, value) + + return data, nil +} diff --git a/pkg/sources/flow/adapter/transformation/transformer/transformer.go b/pkg/sources/flow/adapter/transformation/transformer/transformer.go new file mode 100644 index 00000000..4a6059d8 --- /dev/null +++ b/pkg/sources/flow/adapter/transformation/transformer/transformer.go @@ -0,0 +1,14 @@ +package transformer + +import ( + "github.com/zeiss/typhoon/pkg/flow/adapter/transformation/common/storage" +) + +// Transformer is an interface that contains common methods +// to work with JSON data. +type Transformer interface { + New(key, value, separator string) Transformer + Apply(eventID string, data []byte) ([]byte, error) + SetStorage(*storage.Storage) + InitStep() bool +} diff --git a/pkg/sources/flow/adapter/xmltojsontransformation/adapter.go b/pkg/sources/flow/adapter/xmltojsontransformation/adapter.go new file mode 100644 index 00000000..c115c208 --- /dev/null +++ b/pkg/sources/flow/adapter/xmltojsontransformation/adapter.go @@ -0,0 +1,128 @@ +package xmltojsontransformation + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "io" + + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + xj "github.com/basgys/goxml2json" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` + // CloudEvents responses parametrization + CloudEventPayloadPolicy string `envconfig:"EVENTS_PAYLOAD_POLICY" default:"error"` + // Sink defines the target sink for the events. If no Sink is defined the + // events are replied back to the sender. + Sink string `envconfig:"K_SINK"` +} + +// NewAdapter adapter implementation +func NewAdapter(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.XMLToJSONTransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + replier, err := targetce.New(env.Component, logger.Named("replier"), + targetce.ReplierWithStatefulHeaders(env.BridgeIdentifier), + targetce.ReplierWithStaticResponseType(v1alpha1.EventTypeXMLToJSONGenericResponse), + targetce.ReplierWithPayloadPolicy(targetce.PayloadPolicy(env.CloudEventPayloadPolicy))) + if err != nil { + logger.Panicf("Error creating CloudEvents replier: %v", err) + } + + return &Adapter{ + sink: env.Sink, + replier: replier, + ceClient: ceClient, + logger: logger, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + } +} + +var _ pkgadapter.Adapter = (*Adapter)(nil) + +type Adapter struct { + sink string + replier *targetce.Replier + ceClient cloudevents.Client + logger *zap.SugaredLogger + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter +} + +// Start is a blocking function and will return if an error occurs +// or the context is cancelled. +func (a *Adapter) Start(ctx context.Context) error { + a.logger.Info("Starting XMLToJSONTransformation Adapter") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *Adapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + if !isValidXML(event.Data()) { + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + errors.New("invalid XML"), nil) + } + + xml := bytes.NewReader(event.Data()) + jsn, err := xj.Convert(xml) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + readBuf, err := io.ReadAll(jsn) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if err := event.SetData(cloudevents.ApplicationJSON, readBuf); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if a.sink != "" { + if result := a.ceClient.Send(ctx, event); !cloudevents.IsACK(result) { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + return nil, cloudevents.ResultACK + } + + return &event, cloudevents.ResultACK +} + +func isValidXML(data []byte) bool { + return xml.Unmarshal(data, new(interface{})) == nil +} diff --git a/pkg/sources/flow/adapter/xslttransformation/adapter.go b/pkg/sources/flow/adapter/xslttransformation/adapter.go new file mode 100644 index 00000000..8392ef8a --- /dev/null +++ b/pkg/sources/flow/adapter/xslttransformation/adapter.go @@ -0,0 +1,153 @@ +//go:build !noclibs + +package xslttransformation + +import ( + "context" + "errors" + "fmt" + "runtime" + + xslt "github.com/wamuir/go-xslt" + "go.uber.org/zap" + + cloudevents "github.com/cloudevents/sdk-go/v2" + pkgadapter "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/pkg/logging" + + "github.com/zeiss/typhoon/pkg/apis/flow" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + "github.com/zeiss/typhoon/pkg/metrics" + targetce "github.com/zeiss/typhoon/pkg/targets/adapter/cloudevents" +) + +var _ pkgadapter.Adapter = (*xsltTransformAdapter)(nil) + +type xsltTransformAdapter struct { + defaultXSLT *xslt.Stylesheet + xsltOverride bool + + replier *targetce.Replier + ceClient cloudevents.Client + logger *zap.SugaredLogger + sink string + + mt *pkgadapter.MetricTag + sr *metrics.EventProcessingStatsReporter +} + +// NewTarget adapter implementation +func NewTarget(ctx context.Context, envAcc pkgadapter.EnvConfigAccessor, ceClient cloudevents.Client) pkgadapter.Adapter { + logger := logging.FromContext(ctx) + + mt := &pkgadapter.MetricTag{ + ResourceGroup: flow.XSLTTransformationResource.String(), + Namespace: envAcc.GetNamespace(), + Name: envAcc.GetName(), + } + + metrics.MustRegisterEventProcessingStatsView() + + env := envAcc.(*envAccessor) + + if err := env.validate(); err != nil { + logger.Panicf("Configuration error: %v", err) + } + + replier, err := targetce.New(env.Component, logger.Named("replier"), + targetce.ReplierWithStatefulHeaders(env.BridgeIdentifier), + targetce.ReplierWithStaticDataContentType(cloudevents.ApplicationXML), + targetce.ReplierWithStaticErrorDataContentType(*cloudevents.StringOfApplicationJSON()), + targetce.ReplierWithPayloadPolicy(targetce.PayloadPolicy(targetce.PayloadPolicyAlways))) + if err != nil { + logger.Panicf("Error creating CloudEvents replier: %v", err) + } + + adapter := &xsltTransformAdapter{ + xsltOverride: env.AllowXSLTOverride, + + replier: replier, + ceClient: ceClient, + logger: logger, + sink: env.Sink, + + mt: mt, + sr: metrics.MustNewEventProcessingStatsReporter(mt), + } + + if env.XSLT != "" { + adapter.defaultXSLT, err = xslt.NewStylesheet([]byte(env.XSLT)) + if err != nil { + logger.Panicf("XSLT validation error: %v", err) + } + + runtime.SetFinalizer(adapter.defaultXSLT, (*xslt.Stylesheet).Close) + } + + return adapter +} + +// Start is a blocking function and will return if an error occurs +// or the context is cancelled. +func (a *xsltTransformAdapter) Start(ctx context.Context) error { + a.logger.Info("Starting XSLT transformer") + ctx = pkgadapter.ContextWithMetricTag(ctx, a.mt) + return a.ceClient.StartReceiver(ctx, a.dispatch) +} + +func (a *xsltTransformAdapter) dispatch(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + isStructuredTransform := event.Type() == v1alpha1.EventTypeXSLTTransformation + if isStructuredTransform && !a.xsltOverride { + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + errors.New("it is not allowed to override XSLT per CloudEvent"), nil) + } + + isXML := event.DataMediaType() == cloudevents.ApplicationXML + + var style *xslt.Stylesheet + var xmlin []byte + var err error + + switch { + case isStructuredTransform: + req := &XSLTTransformationStructuredRequest{} + if err := event.DataAs(req); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + + xmlin = []byte(req.XML) + style, err = xslt.NewStylesheet([]byte(req.XSLT)) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestParsing, err, nil) + } + defer style.Close() + + case isXML: + xmlin = event.DataEncoded + style = a.defaultXSLT + + default: + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + errors.New("unexpected type or media-type for the incoming event"), nil) + } + + res, err := style.Transform(xmlin) + if err != nil { + return a.replier.Error(&event, targetce.ErrorCodeRequestValidation, + fmt.Errorf("error processing XML with XSLT: %v", err), nil) + } + + if a.sink != "" { + event.SetType(event.Type() + ".response") + if err := event.SetData(cloudevents.ApplicationXML, res); err != nil { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, nil) + } + + if result := a.ceClient.Send(ctx, event); !cloudevents.IsACK(result) { + return a.replier.Error(&event, targetce.ErrorCodeAdapterProcess, err, "sending the cloudevent to the sink") + } + return nil, cloudevents.ResultACK + } + + return a.replier.Ok(&event, res, targetce.ResponseWithDataContentType(cloudevents.ApplicationXML)) +} diff --git a/pkg/sources/flow/adapter/xslttransformation/config.go b/pkg/sources/flow/adapter/xslttransformation/config.go new file mode 100644 index 00000000..51fa4d75 --- /dev/null +++ b/pkg/sources/flow/adapter/xslttransformation/config.go @@ -0,0 +1,35 @@ +//go:build !noclibs + +package xslttransformation + +import ( + "errors" + + pkgadapter "knative.dev/eventing/pkg/adapter/v2" +) + +// EnvAccessorCtor for configuration parameters +func EnvAccessorCtor() pkgadapter.EnvConfigAccessor { + return &envAccessor{} +} + +type envAccessor struct { + pkgadapter.EnvConfig + // XSLT document that will be used by default for transformation. + XSLT string `envconfig:"XSLTTRANSFORMATION_XSLT"` + // If set to true, enables consuming structured CloudEvents that include + // fields for the XML and XSLT field. + AllowXSLTOverride bool `envconfig:"XSLTTRANSFORMATION_ALLOW_XSLT_OVERRIDE" required:"true"` + // BridgeIdentifier is the name of the bridge workflow this target is part of + BridgeIdentifier string `envconfig:"EVENTS_BRIDGE_IDENTIFIER"` + // Sink defines the target sink for the events. If no Sink is defined the + // events are replied back to the sender. + Sink string `envconfig:"K_SINK"` +} + +func (e *envAccessor) validate() error { + if !e.AllowXSLTOverride && e.XSLT == "" { + return errors.New("if XSLT cannot be overridden by CloudEvent payloads, configured XSLT cannot be empty") + } + return nil +} diff --git a/pkg/sources/flow/adapter/xslttransformation/types.go b/pkg/sources/flow/adapter/xslttransformation/types.go new file mode 100644 index 00000000..34bd9166 --- /dev/null +++ b/pkg/sources/flow/adapter/xslttransformation/types.go @@ -0,0 +1,10 @@ +//go:build !noclibs + +package xslttransformation + +// XSLTTransformationStructuredRequest contains an opinionated structure +// that informs both the XML and XSLT to transform. +type XSLTTransformationStructuredRequest struct { + XML string `json:"xml"` + XSLT string `json:"xslt,omitempty"` +} diff --git a/pkg/sources/flow/reconciler/jqtransformation/adapter.go b/pkg/sources/flow/reconciler/jqtransformation/adapter.go new file mode 100644 index 00000000..66ad3143 --- /dev/null +++ b/pkg/sources/flow/reconciler/jqtransformation/adapter.go @@ -0,0 +1,66 @@ +package jqtransformation + +import ( + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envQuery = "JQ_QUERY" + envEventsPayloadPolicy = "EVENTS_PAYLOAD_POLICY" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/jqtransformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.JQTransformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.JQTransformation) []corev1.EnvVar { + env := []corev1.EnvVar{ + { + Name: envQuery, + Value: o.Spec.Query, + }, + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + } + + if o.Spec.EventOptions != nil && o.Spec.EventOptions.PayloadPolicy != nil { + env = append(env, corev1.EnvVar{ + Name: envEventsPayloadPolicy, + Value: string(*o.Spec.EventOptions.PayloadPolicy), + }) + } + + return env +} diff --git a/pkg/sources/flow/reconciler/jqtransformation/controller.go b/pkg/sources/flow/reconciler/jqtransformation/controller.go new file mode 100644 index 00000000..5d974a77 --- /dev/null +++ b/pkg/sources/flow/reconciler/jqtransformation/controller.go @@ -0,0 +1,52 @@ +package jqtransformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/jqtransformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/jqtransformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.JQTransformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.JQTransformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().JQTransformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/flow/reconciler/jqtransformation/reconciler.go b/pkg/sources/flow/reconciler/jqtransformation/reconciler.go new file mode 100644 index 00000000..7a5c8bf9 --- /dev/null +++ b/pkg/sources/flow/reconciler/jqtransformation/reconciler.go @@ -0,0 +1,30 @@ +package jqtransformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/jqtransformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.JQTransformation, listersv1alpha1.JQTransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.JQTransformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/flow/reconciler/synchronizer/adapter.go b/pkg/sources/flow/reconciler/synchronizer/adapter.go new file mode 100644 index 00000000..70d55925 --- /dev/null +++ b/pkg/sources/flow/reconciler/synchronizer/adapter.go @@ -0,0 +1,67 @@ +package synchronizer + +import ( + "strconv" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/synchronizer-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.Synchronizer) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.Synchronizer) []corev1.EnvVar { + env := []corev1.EnvVar{ + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + { + Name: "CORRELATION_KEY", + Value: o.Spec.CorrelationKey.Attribute, + }, + { + Name: "RESPONSE_WAIT_TIMEOUT", + Value: o.Spec.Response.Timeout.String(), + }, + } + + if o.Spec.CorrelationKey.Length != 0 { + env = append(env, corev1.EnvVar{ + Name: "CORRELATION_KEY_LENGTH", + Value: strconv.Itoa(o.Spec.CorrelationKey.Length), + }) + } + + return env +} diff --git a/pkg/sources/flow/reconciler/synchronizer/controller.go b/pkg/sources/flow/reconciler/synchronizer/controller.go new file mode 100644 index 00000000..e0112be5 --- /dev/null +++ b/pkg/sources/flow/reconciler/synchronizer/controller.go @@ -0,0 +1,52 @@ +package synchronizer + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/synchronizer" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/synchronizer" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.Synchronizer)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.Synchronizer]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().Synchronizers, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/flow/reconciler/synchronizer/reconciler.go b/pkg/sources/flow/reconciler/synchronizer/reconciler.go new file mode 100644 index 00000000..64670f3b --- /dev/null +++ b/pkg/sources/flow/reconciler/synchronizer/reconciler.go @@ -0,0 +1,30 @@ +package synchronizer + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/synchronizer" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.Synchronizer, listersv1alpha1.SynchronizerNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.Synchronizer) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/flow/reconciler/transformation/adapter.go b/pkg/sources/flow/reconciler/transformation/adapter.go new file mode 100644 index 00000000..c935d5f5 --- /dev/null +++ b/pkg/sources/flow/reconciler/transformation/adapter.go @@ -0,0 +1,69 @@ +package transformation + +import ( + "encoding/json" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envTransformationCtx = "TRANSFORMATION_CONTEXT" + envTransformationData = "TRANSFORMATION_DATA" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/transformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.Transformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.Transformation) []corev1.EnvVar { + var trnContext string + if b, err := json.Marshal(o.Spec.Context); err == nil { + trnContext = string(b) + } + + var trnData string + if b, err := json.Marshal(o.Spec.Data); err == nil { + trnData = string(b) + } + + return []corev1.EnvVar{ + { + Name: envTransformationCtx, + Value: trnContext, + }, + { + Name: envTransformationData, + Value: trnData, + }, + } +} diff --git a/pkg/sources/flow/reconciler/transformation/controller.go b/pkg/sources/flow/reconciler/transformation/controller.go new file mode 100644 index 00000000..7209565c --- /dev/null +++ b/pkg/sources/flow/reconciler/transformation/controller.go @@ -0,0 +1,52 @@ +package transformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/transformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/transformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.Transformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.Transformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().Transformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/flow/reconciler/transformation/reconciler.go b/pkg/sources/flow/reconciler/transformation/reconciler.go new file mode 100644 index 00000000..1efc9fc9 --- /dev/null +++ b/pkg/sources/flow/reconciler/transformation/reconciler.go @@ -0,0 +1,30 @@ +package transformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/transformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.Transformation, listersv1alpha1.TransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.Transformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/flow/reconciler/xmltojsontransformation/adapter.go b/pkg/sources/flow/reconciler/xmltojsontransformation/adapter.go new file mode 100644 index 00000000..40721858 --- /dev/null +++ b/pkg/sources/flow/reconciler/xmltojsontransformation/adapter.go @@ -0,0 +1,61 @@ +package xmltojsontransformation + +import ( + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envEventsPayloadPolicy = "EVENTS_PAYLOAD_POLICY" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/xmltojsontransformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.XMLToJSONTransformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.XMLToJSONTransformation) []corev1.EnvVar { + env := []corev1.EnvVar{ + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + } + + if o.Spec.EventOptions != nil && o.Spec.EventOptions.PayloadPolicy != nil { + env = append(env, corev1.EnvVar{ + Name: envEventsPayloadPolicy, + Value: string(*o.Spec.EventOptions.PayloadPolicy), + }) + } + + return env +} diff --git a/pkg/sources/flow/reconciler/xmltojsontransformation/controller.go b/pkg/sources/flow/reconciler/xmltojsontransformation/controller.go new file mode 100644 index 00000000..b8ca612c --- /dev/null +++ b/pkg/sources/flow/reconciler/xmltojsontransformation/controller.go @@ -0,0 +1,52 @@ +package xmltojsontransformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/xmltojsontransformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xmltojsontransformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.XMLToJSONTransformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.XMLToJSONTransformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().XMLToJSONTransformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/flow/reconciler/xmltojsontransformation/reconciler.go b/pkg/sources/flow/reconciler/xmltojsontransformation/reconciler.go new file mode 100644 index 00000000..16db289e --- /dev/null +++ b/pkg/sources/flow/reconciler/xmltojsontransformation/reconciler.go @@ -0,0 +1,30 @@ +package xmltojsontransformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xmltojsontransformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.XMLToJSONTransformation, listersv1alpha1.XMLToJSONTransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.XMLToJSONTransformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/flow/reconciler/xslttransformation/adapter.go b/pkg/sources/flow/reconciler/xslttransformation/adapter.go new file mode 100644 index 00000000..ad9e1541 --- /dev/null +++ b/pkg/sources/flow/reconciler/xslttransformation/adapter.go @@ -0,0 +1,65 @@ +package xslttransformation + +import ( + "strconv" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envXSLT = "XSLTTRANSFORMATION_XSLT" + envAllowXSLTOverride = "XSLTTRANSFORMATION_ALLOW_XSLT_OVERRIDE" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + obsConfig source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/xslttransformation-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(trg commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedTrg := trg.(*v1alpha1.XSLTTransformation) + + return common.NewAdapterKnService(trg, sinkURI, + resource.Image(r.adapterCfg.Image), + resource.EnvVars(MakeAppEnv(typedTrg)...), + resource.EnvVars(r.adapterCfg.obsConfig.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.XSLTTransformation) []corev1.EnvVar { + env := []corev1.EnvVar{ + *o.Spec.XSLT.ToEnvironmentVariable(envXSLT), + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + } + + if o.Spec.AllowPerEventXSLT != nil { + env = append(env, corev1.EnvVar{ + Name: envAllowXSLTOverride, + Value: strconv.FormatBool(*o.Spec.AllowPerEventXSLT), + }) + } + + return env +} diff --git a/pkg/sources/flow/reconciler/xslttransformation/controller.go b/pkg/sources/flow/reconciler/xslttransformation/controller.go new file mode 100644 index 00000000..d78cccbf --- /dev/null +++ b/pkg/sources/flow/reconciler/xslttransformation/controller.go @@ -0,0 +1,52 @@ +package xslttransformation + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/flow/v1alpha1/xslttransformation" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xslttransformation" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.XSLTTransformation)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYTARGET_IMAGE. + adapterCfg := &adapterConfig{ + obsConfig: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.XSLTTransformation]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().XSLTTransformations, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/flow/reconciler/xslttransformation/reconciler.go b/pkg/sources/flow/reconciler/xslttransformation/reconciler.go new file mode 100644 index 00000000..128533a9 --- /dev/null +++ b/pkg/sources/flow/reconciler/xslttransformation/reconciler.go @@ -0,0 +1,30 @@ +package xslttransformation + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/flow/v1alpha1/xslttransformation" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/flow/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event target type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.XSLTTransformation, listersv1alpha1.XSLTTransformationNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, trg *v1alpha1.XSLTTransformation) reconciler.Event { + // inject target into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, trg) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/reconciler/cloudeventssource/adapter.go b/pkg/sources/reconciler/cloudeventssource/adapter.go new file mode 100644 index 00000000..d08d55be --- /dev/null +++ b/pkg/sources/reconciler/cloudeventssource/adapter.go @@ -0,0 +1,187 @@ +package cloudeventssource + +import ( + "encoding/json" + "fmt" + "path" + "path/filepath" + "strconv" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/adapter/v2" + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" + "github.com/zeiss/typhoon/pkg/sources/cloudevents" +) + +const ( + envCloudEventsPath = "CLOUDEVENTS_PATH" + envCloudEventsBasicAuthCredentials = "CLOUDEVENTS_BASICAUTH_CREDENTIALS" + envCloudEventsRateLimiterRPS = "CLOUDEVENTS_RATELIMITER_RPS" +) + +// adapterConfig contains properties used to configure the source's adapter. +// These are automatically populated by envconfig. +type adapterConfig struct { + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/cloudeventssource-adapter"` + // Configuration accessor for logging/metrics/tracing + configs source.ConfigAccessor +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(src commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedSrc := src.(*v1alpha1.CloudEventsSource) + + var authVolumes []corev1.Volume + var authVolumeMounts []corev1.VolumeMount + var authEnvs []corev1.EnvVar + + if typedSrc.Spec.Credentials != nil { + // For each BasicAuth credentials a secret is mounted and a tuple + // key/mounted-file pair is added to the environment variable. + kvs := []KeyMountedValue{} + + const ( + secretArrayNamePrefix = "basicauths" + secretBasePath = "/opt" + secretFileName = "cesource" + ) + + for i, ba := range typedSrc.Spec.Credentials.BasicAuths { + if ba.Password.ValueFromSecret != nil { + secretName := fmt.Sprintf("%s%d", secretArrayNamePrefix, i) + secretPath := filepath.Join(secretBasePath, secretName) + + v, vm := secretVolumeAndMountAtPath( + secretName, + secretPath, + secretFileName, + ba.Password.ValueFromSecret.Name, + ba.Password.ValueFromSecret.Key, + ) + authVolumes = append(authVolumes, v) + authVolumeMounts = append(authVolumeMounts, vm) + + kvs = append(kvs, KeyMountedValue{ + Key: ba.Username, + MountedValueFile: path.Join(secretPath, secretFileName), + }) + } + } + + if len(kvs) > 0 { + s, err := json.Marshal(kvs) + if err != nil { + return nil, fmt.Errorf("serializing keyMountedValues to JSON: %w", err) + } + + authEnvs = append(authEnvs, corev1.EnvVar{ + Name: envCloudEventsBasicAuthCredentials, + Value: string(s), + }) + } + } + + ceOverridesStr := cloudevents.OverridesJSON(typedSrc.Spec.CloudEventOverrides) + + // Common reconciler internals set the visibility to non public by default. That does + // not play well with sources which should default to being public if no visibility + // configuration is provided. + switch { + case typedSrc.Spec.AdapterOverrides == nil: + t := true + typedSrc.Spec.AdapterOverrides = &commonv1alpha1.AdapterOverrides{ + Public: &t, + } + case typedSrc.Spec.AdapterOverrides.Public == nil: + t := true + typedSrc.Spec.AdapterOverrides.Public = &t + } + + return common.NewAdapterKnService(src, sinkURI, + resource.Image(r.adapterCfg.Image), + + resource.Volumes(authVolumes...), + resource.VolumeMounts(authVolumeMounts...), + resource.EnvVars(authEnvs...), + + resource.EnvVars(MakeAppEnv(typedSrc)...), + resource.EnvVar(adapter.EnvConfigCEOverrides, ceOverridesStr), + resource.EnvVars(r.adapterCfg.configs.ToEnvVars()...), + ), nil +} + +type KeyMountedValue struct { + Key string + MountedValueFile string +} + +func (kmv *KeyMountedValue) Decode(value string) error { + if err := json.Unmarshal([]byte(value), kmv); err != nil { + return err + } + return nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.CloudEventsSource) []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + Name: common.EnvBridgeID, + Value: common.GetStatefulBridgeID(o), + }, + } + + if o.Spec.Path != nil { + envs = append(envs, corev1.EnvVar{ + Name: envCloudEventsPath, + Value: *o.Spec.Path, + }) + } + + if o.Spec.RateLimiter != nil { + envs = append(envs, corev1.EnvVar{ + Name: envCloudEventsRateLimiterRPS, + Value: strconv.Itoa(o.Spec.RateLimiter.RequestsPerSecond), + }) + } + + return envs +} + +// secretVolumeAndMountAtPath returns a Secret-based volume and corresponding +// mount at the given path. +func secretVolumeAndMountAtPath(name, mountPath, mountFile, secretName, secretKey string) (corev1.Volume, corev1.VolumeMount) { + v := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + Items: []corev1.KeyToPath{{ + Key: secretKey, + Path: mountFile, + }}, + }, + }, + } + + vm := corev1.VolumeMount{ + Name: name, + ReadOnly: true, + MountPath: mountPath, + } + + return v, vm +} diff --git a/pkg/sources/reconciler/cloudeventssource/controller.go b/pkg/sources/reconciler/cloudeventssource/controller.go new file mode 100644 index 00000000..81132ecb --- /dev/null +++ b/pkg/sources/reconciler/cloudeventssource/controller.go @@ -0,0 +1,51 @@ +package cloudeventssource + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/sources/v1alpha1/cloudeventssource" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/cloudeventssource" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController creates a Reconciler for the event source and returns the result of NewImpl. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.CloudEventsSource)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYSOURCE_IMAGE. + adapterCfg := &adapterConfig{ + configs: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.CloudEventsSource]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().CloudEventsSources, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/reconciler/cloudeventssource/reconciler.go b/pkg/sources/reconciler/cloudeventssource/reconciler.go new file mode 100644 index 00000000..6d86944c --- /dev/null +++ b/pkg/sources/reconciler/cloudeventssource/reconciler.go @@ -0,0 +1,29 @@ +package cloudeventssource + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/cloudeventssource" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event source type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.CloudEventsSource, listersv1alpha1.CloudEventsSourceNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, o *v1alpha1.CloudEventsSource) reconciler.Event { + // inject source into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, o) + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/reconciler/httppollersource/adapter.go b/pkg/sources/reconciler/httppollersource/adapter.go new file mode 100644 index 00000000..4be7bb2a --- /dev/null +++ b/pkg/sources/reconciler/httppollersource/adapter.go @@ -0,0 +1,117 @@ +package httppollersource + +import ( + "sort" + "strconv" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envHTTPPollerEventType = "HTTPPOLLER_EVENT_TYPE" + envHTTPPollerEventSource = "HTTPPOLLER_EVENT_SOURCE" + envHTTPPollerEndpoint = "HTTPPOLLER_ENDPOINT" + envHTTPPollerMethod = "HTTPPOLLER_METHOD" + envHTTPPollerSkipVerify = "HTTPPOLLER_SKIP_VERIFY" + envHTTPPollerCACertificate = "HTTPPOLLER_CA_CERTIFICATE" + envHTTPPollerBasicAuthUsername = "HTTPPOLLER_BASICAUTH_USERNAME" + envHTTPPollerBasicAuthPassword = "HTTPPOLLER_BASICAUTH_PASSWORD" + envHTTPPollerHeaders = "HTTPPOLLER_HEADERS" + envHTTPPollerInterval = "HTTPPOLLER_INTERVAL" +) + +// adapterConfig contains properties used to configure the source's adapter. +// These are automatically populated by envconfig. +type adapterConfig struct { + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/httppollersource-adapter"` + + // Configuration accessor for logging/metrics/tracing + configs source.ConfigAccessor +} + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(src commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*appsv1.Deployment, error) { + typedSrc := src.(*v1alpha1.HTTPPollerSource) + + return common.NewAdapterDeployment(src, sinkURI, + resource.Image(r.adapterCfg.Image), + + resource.EnvVars(MakeAppEnv(typedSrc)...), + resource.EnvVars(r.adapterCfg.configs.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(src *v1alpha1.HTTPPollerSource) []corev1.EnvVar { + skipVerify := false + if src.Spec.SkipVerify != nil { + skipVerify = *src.Spec.SkipVerify + } + + envs := []corev1.EnvVar{{ + Name: envHTTPPollerEventType, + Value: src.Spec.EventType, + }, { + Name: envHTTPPollerEventSource, + Value: src.AsEventSource(), + }, { + Name: envHTTPPollerEndpoint, + Value: src.Spec.Endpoint.String(), + }, { + Name: envHTTPPollerMethod, + Value: src.Spec.Method, + }, { + Name: envHTTPPollerSkipVerify, + Value: strconv.FormatBool(skipVerify), + }, { + Name: envHTTPPollerInterval, + Value: src.Spec.Interval.String(), + }} + + if src.Spec.Headers != nil { + headers := make([]string, 0, len(src.Spec.Headers)) + for k, v := range src.Spec.Headers { + headers = append(headers, k+":"+v) + } + sort.Strings(headers) + + envs = append(envs, corev1.EnvVar{ + Name: envHTTPPollerHeaders, + Value: strings.Join(headers, ","), + }) + } + + if user := src.Spec.BasicAuthUsername; user != nil { + envs = append(envs, corev1.EnvVar{ + Name: envHTTPPollerBasicAuthUsername, + Value: *user, + }) + } + + if passw := src.Spec.BasicAuthPassword; passw != nil { + envs = common.MaybeAppendValueFromEnvVar(envs, + envHTTPPollerBasicAuthPassword, *passw, + ) + } + + if src.Spec.CACertificate != nil { + envs = append(envs, corev1.EnvVar{ + Name: envHTTPPollerCACertificate, + Value: *src.Spec.CACertificate, + }) + } + + return envs +} diff --git a/pkg/sources/reconciler/httppollersource/controller.go b/pkg/sources/reconciler/httppollersource/controller.go new file mode 100644 index 00000000..59473d7e --- /dev/null +++ b/pkg/sources/reconciler/httppollersource/controller.go @@ -0,0 +1,49 @@ +package httppollersource + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/sources/v1alpha1/httppollersource" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/httppollersource" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController creates a Reconciler for the event source and returns the result of NewImpl. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.HTTPPollerSource)(nil) + app := common.ComponentName(typ) + + adapterCfg := &adapterConfig{ + configs: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericDeploymentReconciler[*v1alpha1.HTTPPollerSource]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().HTTPPollerSources, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/reconciler/httppollersource/reconciler.go b/pkg/sources/reconciler/httppollersource/reconciler.go new file mode 100644 index 00000000..a2badf9a --- /dev/null +++ b/pkg/sources/reconciler/httppollersource/reconciler.go @@ -0,0 +1,30 @@ +package httppollersource + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/httppollersource" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event source type. +type Reconciler struct { + base common.GenericDeploymentReconciler[*v1alpha1.HTTPPollerSource, listersv1alpha1.HTTPPollerSourceNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, src *v1alpha1.HTTPPollerSource) reconciler.Event { + // inject source into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, src) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/reconciler/kafkasource/adapter.go b/pkg/sources/reconciler/kafkasource/adapter.go new file mode 100644 index 00000000..c9fe1d27 --- /dev/null +++ b/pkg/sources/reconciler/kafkasource/adapter.go @@ -0,0 +1,248 @@ +package kafkasource + +import ( + "path/filepath" + "strconv" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envBootstrapServers = "BOOTSTRAP_SERVERS" + envTopic = "TOPIC" + envGroupID = "GROUP_ID" + envUsername = "USERNAME" + envPassword = "PASSWORD" + envSecurityMechanisms = "SECURITY_MECHANISMS" + envCA = "CA" + envClientCert = "CLIENT_CERT" + envClientKey = "CLIENT_KEY" + envSkipVerify = "SKIP_VERIFY" + + envSaslEnable = "SASL_ENABLE" + envTLSEnable = "TLS_ENABLE" + + envKerberosConfigPath = "KERBEROS_CONFIG_PATH" + envKerberosKeytabPath = "KERBEROS_KEYTAB_PATH" + envKerberosServiceName = "KERBEROS_SERVICE_NAME" + envKerberosRealm = "KERBEROS_REALM" + envKerberosUsername = "KERBEROS_USERNAME" + envKerberosPassword = "KERBEROS_PASSWORD" + + krb5ConfPath = "/etc/krb5.conf" + krb5KeytabPath = "/etc/krb5.keytab" +) + +// adapterConfig contains properties used to configure the target's adapter. +// Public fields are automatically populated by envconfig. +type adapterConfig struct { + // Configuration accessor for logging/metrics/tracing + configs source.ConfigAccessor + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/kafkasource-adapter"` +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*appsv1.Deployment] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(src commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*appsv1.Deployment, error) { + typedSrc := src.(*v1alpha1.KafkaSource) + + var secretVolumes []corev1.Volume + var secretVolMounts []corev1.VolumeMount + + if typedSrc.Spec.Auth.Kerberos != nil { + if typedSrc.Spec.Auth.Kerberos.Config != nil { + configVol, configVolMount := secretVolumeAndMountAtPath( + "krb5-config", + krb5ConfPath, + typedSrc.Spec.Auth.Kerberos.Config.ValueFromSecret.Name, + typedSrc.Spec.Auth.Kerberos.Config.ValueFromSecret.Key, + ) + secretVolumes = append(secretVolumes, configVol) + secretVolMounts = append(secretVolMounts, configVolMount) + } + + if typedSrc.Spec.Auth.Kerberos.Keytab != nil { + keytabVol, keytabVolMount := secretVolumeAndMountAtPath( + "krb5-keytab", + krb5KeytabPath, + typedSrc.Spec.Auth.Kerberos.Keytab.ValueFromSecret.Name, + typedSrc.Spec.Auth.Kerberos.Keytab.ValueFromSecret.Key, + ) + secretVolumes = append(secretVolumes, keytabVol) + secretVolMounts = append(secretVolMounts, keytabVolMount) + } + } + + return common.NewAdapterDeployment(src, sinkURI, + resource.Image(r.adapterCfg.Image), + + resource.EnvVars(MakeAppEnv(typedSrc)...), + resource.EnvVars(r.adapterCfg.configs.ToEnvVars()...), + + resource.Volumes(secretVolumes...), + resource.VolumeMounts(secretVolMounts...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(o *v1alpha1.KafkaSource) []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + Name: envBootstrapServers, + Value: strings.Join(o.Spec.BootstrapServers, ","), + }, + { + Name: envTopic, + Value: o.Spec.Topic, + }, + { + Name: envSaslEnable, + Value: strconv.FormatBool(o.Spec.Auth.SASLEnable), + }, + { + Name: envGroupID, + Value: o.Spec.GroupID, + }, + } + + if o.Spec.Auth.TLSEnable != nil { + envs = append(envs, corev1.EnvVar{ + Name: envTLSEnable, + Value: strconv.FormatBool(*o.Spec.Auth.TLSEnable), + }) + } + + if o.Spec.Auth.SecurityMechanisms != nil { + envs = append(envs, corev1.EnvVar{ + Name: envSecurityMechanisms, + Value: *o.Spec.Auth.SecurityMechanisms, + }) + } + + if o.Spec.Auth.Username != nil { + envs = append(envs, corev1.EnvVar{ + Name: envUsername, + Value: *o.Spec.Auth.Username, + }) + } + + if o.Spec.Auth.Password != nil { + envs = common.MaybeAppendValueFromEnvVar( + envs, envPassword, *o.Spec.Auth.Password, + ) + } + + if o.Spec.Auth.TLS != nil { + if o.Spec.Auth.TLS.CA != nil { + envs = common.MaybeAppendValueFromEnvVar( + envs, envCA, *o.Spec.Auth.TLS.CA, + ) + } + + if o.Spec.Auth.TLS.ClientCert != nil { + envs = common.MaybeAppendValueFromEnvVar( + envs, envClientCert, *o.Spec.Auth.TLS.ClientCert, + ) + } + + if o.Spec.Auth.TLS.ClientKey != nil { + envs = common.MaybeAppendValueFromEnvVar( + envs, envClientKey, *o.Spec.Auth.TLS.ClientKey, + ) + } + + if o.Spec.Auth.TLS.SkipVerify != nil { + envs = append(envs, corev1.EnvVar{ + Name: envSkipVerify, + Value: strconv.FormatBool(*o.Spec.Auth.TLS.SkipVerify), + }) + } + } + + if o.Spec.Auth.Kerberos != nil { + if o.Spec.Auth.Kerberos.Config != nil { + envs = append(envs, corev1.EnvVar{ + Name: envKerberosConfigPath, + Value: krb5ConfPath, + }) + } + + if o.Spec.Auth.Kerberos.Keytab != nil { + envs = append(envs, corev1.EnvVar{ + Name: envKerberosKeytabPath, + Value: krb5KeytabPath, + }) + } + + if o.Spec.Auth.Kerberos.ServiceName != nil { + envs = append(envs, corev1.EnvVar{ + Name: envKerberosServiceName, + Value: *o.Spec.Auth.Kerberos.ServiceName, + }) + } + + if o.Spec.Auth.Kerberos.Realm != nil { + envs = append(envs, corev1.EnvVar{ + Name: envKerberosRealm, + Value: *o.Spec.Auth.Kerberos.Realm, + }) + } + + if o.Spec.Auth.Kerberos.Username != nil { + envs = append(envs, corev1.EnvVar{ + Name: envKerberosUsername, + Value: *o.Spec.Auth.Kerberos.Username, + }) + } + + if o.Spec.Auth.Kerberos.Password != nil { + envs = common.MaybeAppendValueFromEnvVar( + envs, envKerberosPassword, *o.Spec.Auth.Kerberos.Password, + ) + } + } + + return envs +} + +// secretVolumeAndMountAtPath returns a Secret-based volume and corresponding +// mount at the given path. +func secretVolumeAndMountAtPath(name, mountPath, secretName, secretKey string) (corev1.Volume, corev1.VolumeMount) { + v := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + Items: []corev1.KeyToPath{ + { + Key: secretKey, + Path: filepath.Base(mountPath), + }, + }, + }, + }, + } + + vm := corev1.VolumeMount{ + Name: name, + ReadOnly: true, + MountPath: mountPath, + SubPath: filepath.Base(mountPath), + } + + return v, vm +} diff --git a/pkg/sources/reconciler/kafkasource/controller.go b/pkg/sources/reconciler/kafkasource/controller.go new file mode 100644 index 00000000..3c6c912f --- /dev/null +++ b/pkg/sources/reconciler/kafkasource/controller.go @@ -0,0 +1,51 @@ +package kafkasource + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/sources/v1alpha1/kafkasource" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/kafkasource" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController creates a Reconciler for the event source and returns the result of NewImpl. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.KafkaSource)(nil) + app := common.ComponentName(typ) + + // Calling envconfig.Process() with a prefix appends that prefix + // (uppercased) to the Go field name, e.g. MYSOURCE_IMAGE. + adapterCfg := &adapterConfig{ + configs: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericDeploymentReconciler[*v1alpha1.KafkaSource]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().KafkaSources, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/reconciler/kafkasource/reconciler.go b/pkg/sources/reconciler/kafkasource/reconciler.go new file mode 100644 index 00000000..37e5fc86 --- /dev/null +++ b/pkg/sources/reconciler/kafkasource/reconciler.go @@ -0,0 +1,29 @@ +package kafkasource + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/kafkasource" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event source type. +type Reconciler struct { + base common.GenericDeploymentReconciler[*v1alpha1.KafkaSource, listersv1alpha1.KafkaSourceNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, src *v1alpha1.KafkaSource) reconciler.Event { + // inject source into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, src) + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/reconciler/webhooksource/adapter.go b/pkg/sources/reconciler/webhooksource/adapter.go new file mode 100644 index 00000000..0201febb --- /dev/null +++ b/pkg/sources/reconciler/webhooksource/adapter.go @@ -0,0 +1,107 @@ +package webhooksource + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/apis" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" + "github.com/zeiss/typhoon/pkg/reconciler/resource" +) + +const ( + envWebhookEventType = "WEBHOOK_EVENT_TYPE" + envWebhookEventSource = "WEBHOOK_EVENT_SOURCE" + envWebhookEventExtensionAttributesFrom = "WEBHOOK_EVENT_EXTENSION_ATTRIBUTES_FROM" + envWebhookBasicAuthUsername = "WEBHOOK_BASICAUTH_USERNAME" + envWebhookBasicAuthPassword = "WEBHOOK_BASICAUTH_PASSWORD" + envCorsAllowOrigin = "WEBHOOK_CORS_ALLOW_ORIGIN" +) + +// adapterConfig contains properties used to configure the adapter. +// These are automatically populated by envconfig. +type adapterConfig struct { + // Container image + Image string `default:"ghcr.io/zeiss/typhoon/webhook-adapter"` + + // Configuration accessor for logging/metrics/tracing + configs source.ConfigAccessor +} + +// Verify that Reconciler implements common.AdapterBuilder. +var _ common.AdapterBuilder[*servingv1.Service] = (*Reconciler)(nil) + +// BuildAdapter implements common.AdapterBuilder. +func (r *Reconciler) BuildAdapter(src commonv1alpha1.Reconcilable, sinkURI *apis.URL) (*servingv1.Service, error) { + typedSrc := src.(*v1alpha1.WebhookSource) + + // Common reconciler internals set the visibility to non public by default. That does + // not play well with sources which should default to being public if no visibility + // configuration is provided. + switch { + case typedSrc.Spec.AdapterOverrides == nil: + t := true + typedSrc.Spec.AdapterOverrides = &commonv1alpha1.AdapterOverrides{ + Public: &t, + } + case typedSrc.Spec.AdapterOverrides.Public == nil: + t := true + typedSrc.Spec.AdapterOverrides.Public = &t + } + + return common.NewAdapterKnService(src, sinkURI, + resource.Image(r.adapterCfg.Image), + + resource.EnvVars(MakeAppEnv(typedSrc)...), + resource.EnvVars(r.adapterCfg.configs.ToEnvVars()...), + ), nil +} + +// MakeAppEnv extracts environment variables from the object. +// Exported to be used in external tools for local test environments. +func MakeAppEnv(src *v1alpha1.WebhookSource) []corev1.EnvVar { + envs := []corev1.EnvVar{{ + Name: envWebhookEventType, + Value: src.Spec.EventType, + }, { + Name: envWebhookEventSource, + Value: src.AsEventSource(), + }} + + if extAttributes := src.Spec.EventExtensionAttributes; extAttributes != nil { + if len(extAttributes.From) != 0 { + envs = append(envs, corev1.EnvVar{ + Name: envWebhookEventExtensionAttributesFrom, + Value: strings.Join(extAttributes.From, ","), + }) + } + } + + if origin := src.Spec.CORSAllowOrigin; origin != nil { + envs = append(envs, corev1.EnvVar{ + Name: envCorsAllowOrigin, + Value: *origin, + }) + } + + if user := src.Spec.BasicAuthUsername; user != nil { + envs = append(envs, corev1.EnvVar{ + Name: envWebhookBasicAuthUsername, + Value: *user, + }) + } + + if passw := src.Spec.BasicAuthPassword; passw != nil { + envs = common.MaybeAppendValueFromEnvVar(envs, + envWebhookBasicAuthPassword, *passw, + ) + } + + return envs +} diff --git a/pkg/sources/reconciler/webhooksource/controller.go b/pkg/sources/reconciler/webhooksource/controller.go new file mode 100644 index 00000000..d6afcbe1 --- /dev/null +++ b/pkg/sources/reconciler/webhooksource/controller.go @@ -0,0 +1,50 @@ +package webhooksource + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "knative.dev/eventing/pkg/reconciler/source" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + informerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/sources/v1alpha1/webhooksource" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/webhooksource" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// NewController initializes the controller and is called by the generated code +// Registers event handlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + typ := (*v1alpha1.WebhookSource)(nil) + app := common.ComponentName(typ) + + adapterCfg := &adapterConfig{ + configs: source.WatchConfigurations(ctx, app, cmw), + } + envconfig.MustProcess(app, adapterCfg) + + informer := informerv1alpha1.Get(ctx) + + r := &Reconciler{ + adapterCfg: adapterCfg, + } + impl := reconcilerv1alpha1.NewImpl(ctx, r) + + r.base = common.NewGenericServiceReconciler[*v1alpha1.WebhookSource]( + ctx, + typ.GetGroupVersionKind(), + impl.Tracker, + impl.EnqueueControllerOf, + informer.Lister().WebhookSources, + ) + + informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/pkg/sources/reconciler/webhooksource/reconciler.go b/pkg/sources/reconciler/webhooksource/reconciler.go new file mode 100644 index 00000000..0148fed4 --- /dev/null +++ b/pkg/sources/reconciler/webhooksource/reconciler.go @@ -0,0 +1,30 @@ +package webhooksource + +import ( + "context" + + "knative.dev/pkg/reconciler" + + commonv1alpha1 "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" + "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" + reconcilerv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/sources/v1alpha1/webhooksource" + listersv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/sources/v1alpha1" + common "github.com/zeiss/typhoon/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for the event source type. +type Reconciler struct { + base common.GenericServiceReconciler[*v1alpha1.WebhookSource, listersv1alpha1.WebhookSourceNamespaceLister] + adapterCfg *adapterConfig +} + +// Check that our Reconciler implements Interface +var _ reconcilerv1alpha1.Interface = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, src *v1alpha1.WebhookSource) reconciler.Event { + // inject source into context for usage in reconciliation logic + ctx = commonv1alpha1.WithReconcilable(ctx, src) + + return r.base.ReconcileAdapter(ctx, r) +} diff --git a/pkg/sources/secret/secret.go b/pkg/sources/secret/secret.go new file mode 100644 index 00000000..9e8fd978 --- /dev/null +++ b/pkg/sources/secret/secret.go @@ -0,0 +1,85 @@ +// Package secret contains utilities for consuming secret values from various +// data sources. +package secret + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/zeiss/typhoon/pkg/apis/common/v1alpha1" +) + +// Secrets is list of secret values. +type Secrets []string + +// Getter can obtain secrets. +type Getter interface { + // Get returns exactly one secret value per input. + Get(...v1alpha1.ValueFromField) (Secrets, error) +} + +// NewGetter returns a Getter for the given namespaced Secret client interface. +func NewGetter(cli coreclientv1.SecretInterface) *GetterWithClientset { + return &GetterWithClientset{ + cli: cli, + } +} + +// GetterWithClientset gets Kubernetes secrets using a namespaced Secret client +// interface. +type GetterWithClientset struct { + cli coreclientv1.SecretInterface +} + +// GetterWithClientset implements Getter. +var _ Getter = (*GetterWithClientset)(nil) + +// Get implements Getter. +func (g *GetterWithClientset) Get(refs ...v1alpha1.ValueFromField) (Secrets, error) { + var s Secrets + + // cache Secret objects by name between iterations to avoid multiple + // round trips to the Kubernetes API for the same Secret object. + secretCache := make(map[string]*corev1.Secret) + + for _, ref := range refs { + val := ref.Value + + if vfs := ref.ValueFromSecret; vfs != nil { + var secr *corev1.Secret + var err error + + if secretCache != nil && secretCache[vfs.Name] != nil { + secr = secretCache[vfs.Name] + } else { + secr, err = g.cli.Get(context.Background(), vfs.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("getting Secret from cluster: %w", err) + } + + secretCache[vfs.Name] = secr + } + + val = string(secr.Data[vfs.Key]) + } + + s = append(s, val) + } + + return s, nil +} + +// GetterFunc allows the use of ordinary functions as Getter. +type GetterFunc func(...v1alpha1.ValueFromField) (Secrets, error) + +// GetterFunc implements Getter. +var _ Getter = (GetterFunc)(nil) + +// Get implements Getter. +func (f GetterFunc) Get(refs ...v1alpha1.ValueFromField) (Secrets, error) { + return f(refs...) +}