Skip to content

Commit 9230eba

Browse files
backend: serviceproxy: helm: server: Add cluster service proxy, helm user verification (#3532)
This commit introduces the in-cluster service proxy feature, tightens auth/verification for Helm installs, and simplifies backend token handling. - Add new serviceproxy package (handler, service, connection, http) to proxy requests to in-cluster Services - Implements ServiceConnection, HTTP GET helper, service discovery and URL prefix generation - Adds comprehensive unit tests for handler, service, connection and http helpers - Registers route: /clusters/{clusterName}/serviceproxy/{namespace}/{name}?request={request} - Disables response caching for proxied responses and supports auth token from cookie or Authorization header - Propagate auth tokens into cluster requests: - Pull token from cookie into Authorization header when missing - Set context.AuthInfo.Token when Authorization bearer token present (used by Helm handlers) - Helm changes: - Add VerifyUser() to perform a whoami-style check (SelfSubjectReview) to ensure minimal privileges before installs - Integrate VerifyUser into install flow and clean up installRelease (streamline error handling) - Add tests for VerifyUser - Misc: - Add tests and small fixes in headlamp server tests to exercise new service proxy behavior Co-authored-by: Murali Annamneni <[email protected]> Co-authored-by: vrushah <[email protected]>
1 parent 8ca3822 commit 9230eba

File tree

17 files changed

+1503
-39
lines changed

17 files changed

+1503
-39
lines changed

backend/cmd/headlamp.go

Lines changed: 43 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ import (
4545
auth "github.com/kubernetes-sigs/headlamp/backend/pkg/auth"
4646
"github.com/kubernetes-sigs/headlamp/backend/pkg/cache"
4747
cfg "github.com/kubernetes-sigs/headlamp/backend/pkg/config"
48+
"github.com/kubernetes-sigs/headlamp/backend/pkg/serviceproxy"
4849

4950
headlampcfg "github.com/kubernetes-sigs/headlamp/backend/pkg/headlampconfig"
5051
"github.com/kubernetes-sigs/headlamp/backend/pkg/helm"
@@ -293,7 +294,7 @@ func addPluginDeleteRoute(config *HeadlampConfig, r *mux.Router) {
293294

294295
logger.Log(logger.LevelInfo, nil, nil, "Received DELETE request for plugin: "+mux.Vars(r)["name"])
295296

296-
if err := checkHeadlampBackendToken(w, r); err != nil {
297+
if err := config.checkHeadlampBackendToken(w, r); err != nil {
297298
config.telemetryHandler.RecordError(span, err, " Invalid backend token")
298299
logger.Log(logger.LevelWarn, nil, err, "Invalid backend token for DELETE /plugins/{name}")
299300
return
@@ -623,8 +624,11 @@ func createHeadlampHandler(config *HeadlampConfig) http.Handler {
623624

624625
oidcAuthConfig, err := kContext.OidcConfig()
625626
if err != nil {
626-
logger.Log(logger.LevelError, map[string]string{"cluster": cluster},
627-
err, "failed to get oidc config")
627+
// Avoid the noise in the pod log while accessing Headlamp using Service Token
628+
if config.oidcIdpIssuerURL != "" {
629+
logger.Log(logger.LevelError, map[string]string{"cluster": cluster},
630+
err, "failed to get oidc config")
631+
}
628632

629633
http.Error(w, err.Error(), http.StatusInternalServerError)
630634
return
@@ -1087,6 +1091,23 @@ func getHelmHandler(c *HeadlampConfig, w http.ResponseWriter, r *http.Request) (
10871091
return nil, errors.New("not found")
10881092
}
10891093

1094+
tokenFromCookie, err := auth.GetTokenFromCookie(r, clusterName)
1095+
1096+
bearerToken := r.Header.Get("Authorization")
1097+
if err == nil && tokenFromCookie != "" && bearerToken == "" {
1098+
r.Header.Set("Authorization", fmt.Sprintf("Bearer %s", tokenFromCookie))
1099+
}
1100+
1101+
// If the request contains a bearer token in the Authorization header, set it in AuthInfo.
1102+
// This token will be used authentication to the Kubernetes cluster.
1103+
bearerToken = r.Header.Get("Authorization")
1104+
if bearerToken != "" {
1105+
reqToken := strings.TrimPrefix(bearerToken, "Bearer ")
1106+
if reqToken != "" {
1107+
context.AuthInfo.Token = reqToken
1108+
}
1109+
}
1110+
10901111
namespace := r.URL.Query().Get("namespace")
10911112

10921113
helmHandler, err := helm.NewHandler(context.ClientConfig(), c.cache, namespace)
@@ -1110,7 +1131,11 @@ func getHelmHandler(c *HeadlampConfig, w http.ResponseWriter, r *http.Request) (
11101131
// Check request for header "X-HEADLAMP_BACKEND-TOKEN" matches HEADLAMP_BACKEND_TOKEN env
11111132
// This check is to prevent access except for from the app.
11121133
// The app sets HEADLAMP_BACKEND_TOKEN, and gives the token to the frontend.
1113-
func checkHeadlampBackendToken(w http.ResponseWriter, r *http.Request) error {
1134+
func (c *HeadlampConfig) checkHeadlampBackendToken(w http.ResponseWriter, r *http.Request) error {
1135+
if c.UseInCluster {
1136+
return nil
1137+
}
1138+
11141139
backendToken := r.Header.Get("X-HEADLAMP_BACKEND-TOKEN")
11151140
backendTokenEnv := os.Getenv("HEADLAMP_BACKEND_TOKEN")
11161141

@@ -1122,6 +1147,16 @@ func checkHeadlampBackendToken(w http.ResponseWriter, r *http.Request) error {
11221147
return nil
11231148
}
11241149

1150+
// handleClusterServiceProxy registers a new route for the path serviceproxy/{namespace}/{name}
1151+
// to proxy requests to in-cluster services.
1152+
func handleClusterServiceProxy(c *HeadlampConfig, router *mux.Router) {
1153+
router.HandleFunc("/clusters/{clusterName}/serviceproxy/{namespace}/{name}",
1154+
func(w http.ResponseWriter, r *http.Request) {
1155+
serviceproxy.RequestHandler(c.KubeConfigStore, w, r)
1156+
}).Queries("request", "{request}").
1157+
Methods("GET")
1158+
}
1159+
11251160
//nolint:funlen
11261161
func handleClusterHelm(c *HeadlampConfig, router *mux.Router) {
11271162
router.PathPrefix("/clusters/{clusterName}/helm/{.*}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -1137,7 +1172,7 @@ func handleClusterHelm(c *HeadlampConfig, router *mux.Router) {
11371172

11381173
c.telemetryHandler.RecordRequestCount(ctx, r, attribute.String("cluster", clusterName))
11391174

1140-
if err := checkHeadlampBackendToken(w, r); err != nil {
1175+
if err := c.checkHeadlampBackendToken(w, r); err != nil {
11411176
c.handleError(w, ctx, span, err, "failed to check headlamp backend token", http.StatusForbidden)
11421177

11431178
return
@@ -1399,6 +1434,7 @@ func (c *HeadlampConfig) handleClusterRequests(router *mux.Router) {
13991434
handleClusterHelm(c, router)
14001435
}
14011436

1437+
handleClusterServiceProxy(c, router)
14021438
handleClusterAPI(c, router)
14031439
}
14041440

@@ -1569,7 +1605,7 @@ func (c *HeadlampConfig) addCluster(w http.ResponseWriter, r *http.Request) {
15691605
defer recordRequestCompletion(c, ctx, start, r)
15701606
c.telemetryHandler.RecordRequestCount(ctx, r)
15711607

1572-
if err := checkHeadlampBackendToken(w, r); err != nil {
1608+
if err := c.checkHeadlampBackendToken(w, r); err != nil {
15731609
c.telemetryHandler.RecordError(span, err, "invalid backend token")
15741610
c.telemetryHandler.RecordErrorCount(ctx, attribute.String("error.type", "invalid token"))
15751611
logger.Log(logger.LevelError, nil, err, "invalid token")
@@ -1777,7 +1813,7 @@ func (c *HeadlampConfig) deleteCluster(w http.ResponseWriter, r *http.Request) {
17771813

17781814
name := mux.Vars(r)["name"]
17791815

1780-
if err := checkHeadlampBackendToken(w, r); err != nil {
1816+
if err := c.checkHeadlampBackendToken(w, r); err != nil {
17811817
c.telemetryHandler.RecordError(span, err, "invalid backend token")
17821818
c.telemetryHandler.RecordErrorCount(ctx, attribute.String("error.type", "invalid_token"))
17831819
logger.Log(logger.LevelError, nil, err, "invalid token")

backend/cmd/headlamp_test.go

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,14 @@ import (
2525
"encoding/json"
2626
"fmt"
2727
"io"
28+
"net"
2829
"net/http"
2930
"net/http/httptest"
3031
"net/url"
3132
"os"
3233
"path/filepath"
3334
"strconv"
35+
"strings"
3436
"testing"
3537
"time"
3638

@@ -43,6 +45,8 @@ import (
4345
"github.com/kubernetes-sigs/headlamp/backend/pkg/telemetry"
4446
"github.com/stretchr/testify/assert"
4547
"github.com/stretchr/testify/require"
48+
corev1 "k8s.io/api/core/v1"
49+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
4650
"k8s.io/client-go/tools/clientcmd"
4751
"k8s.io/client-go/tools/clientcmd/api"
4852
)
@@ -1555,3 +1559,123 @@ func TestCacheMiddleware_CacheInvalidation(t *testing.T) {
15551559
assert.Equal(t, "true", resp1.Header.Get("X-HEADLAMP-CACHE"))
15561560
assert.Equal(t, http.StatusOK, resp1.StatusCode)
15571561
}
1562+
1563+
//nolint:funlen
1564+
func TestHandleClusterServiceProxy(t *testing.T) {
1565+
cfg := &HeadlampConfig{
1566+
HeadlampCFG: &headlampconfig.HeadlampCFG{KubeConfigStore: kubeconfig.NewContextStore()},
1567+
telemetryHandler: &telemetry.RequestHandler{},
1568+
telemetryConfig: GetDefaultTestTelemetryConfig(),
1569+
}
1570+
1571+
// Backend service the proxy should call
1572+
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
1573+
if r.URL.Path == "/healthz" {
1574+
w.WriteHeader(http.StatusOK)
1575+
_, _ = w.Write([]byte("OK"))
1576+
1577+
return
1578+
}
1579+
1580+
http.NotFound(w, r)
1581+
}))
1582+
t.Cleanup(backend.Close)
1583+
1584+
// Extract host:port to feed into the Service external name + port
1585+
bu, err := url.Parse(backend.URL)
1586+
require.NoError(t, err)
1587+
host, portStr, err := net.SplitHostPort(strings.TrimPrefix(bu.Host, "["))
1588+
require.NoError(t, err)
1589+
portNum, err := strconv.Atoi(strings.TrimSuffix(portStr, "]"))
1590+
require.NoError(t, err)
1591+
1592+
// Fake k8s API that returns a Service pointing to backend
1593+
kubeAPI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
1594+
if r.Method == http.MethodGet && r.URL.Path == "/api/v1/namespaces/default/services/my-service" {
1595+
svc := &corev1.Service{
1596+
ObjectMeta: metav1.ObjectMeta{
1597+
Name: "my-service",
1598+
Namespace: "default",
1599+
},
1600+
Spec: corev1.ServiceSpec{
1601+
ExternalName: host,
1602+
Ports: []corev1.ServicePort{
1603+
{
1604+
Name: "http",
1605+
Port: int32(portNum), //nolint:gosec
1606+
},
1607+
},
1608+
},
1609+
}
1610+
1611+
w.Header().Set("Content-Type", "application/json")
1612+
w.WriteHeader(http.StatusOK)
1613+
_ = json.NewEncoder(w).Encode(svc)
1614+
1615+
return
1616+
}
1617+
1618+
http.NotFound(w, r)
1619+
}))
1620+
t.Cleanup(kubeAPI.Close)
1621+
1622+
// Add a context that matches clusterName in URL
1623+
err = cfg.KubeConfigStore.AddContext(&kubeconfig.Context{
1624+
Name: "kubernetes",
1625+
KubeContext: &api.Context{
1626+
Cluster: "kubernetes",
1627+
AuthInfo: "kubernetes",
1628+
},
1629+
Cluster: &api.Cluster{Server: kubeAPI.URL}, // client-go will talk to this
1630+
AuthInfo: &api.AuthInfo{},
1631+
})
1632+
require.NoError(t, err)
1633+
1634+
router := mux.NewRouter()
1635+
handleClusterServiceProxy(cfg, router)
1636+
1637+
cluster := "kubernetes"
1638+
ns := "default"
1639+
svc := "my-service"
1640+
1641+
// Case 1: Missing ?request => route doesn't match => 404, no headers set
1642+
{
1643+
req := httptest.NewRequest(http.MethodGet,
1644+
"/clusters/"+cluster+"/serviceproxy/"+ns+"/"+svc, nil)
1645+
rr := httptest.NewRecorder()
1646+
router.ServeHTTP(rr, req)
1647+
assert.Equal(t, http.StatusNotFound, rr.Code)
1648+
assert.Empty(t, rr.Header().Get("Cache-Control"))
1649+
}
1650+
1651+
// Case 2: ?request present but missing Authorization => 401, headers set
1652+
{
1653+
req := httptest.NewRequest(http.MethodGet,
1654+
"/clusters/"+cluster+"/serviceproxy/"+ns+"/"+svc+"?request=/healthz", nil)
1655+
rr := httptest.NewRecorder()
1656+
router.ServeHTTP(rr, req)
1657+
assert.Equal(t, http.StatusUnauthorized, rr.Code)
1658+
assert.Equal(t, "no-cache, private, max-age=0", rr.Header().Get("Cache-Control"))
1659+
assert.Equal(t, "no-cache", rr.Header().Get("Pragma"))
1660+
assert.Equal(t, "0", rr.Header().Get("X-Accel-Expires"))
1661+
}
1662+
1663+
// Case 3 (Happy path): ?request present and Authorization provided => proxy reaches backend => 200 OK
1664+
{
1665+
req := httptest.NewRequest(http.MethodGet,
1666+
"/clusters/"+cluster+"/serviceproxy/"+ns+"/"+svc+"?request=/healthz", nil)
1667+
req.Header.Set("Authorization", "Bearer test-token")
1668+
1669+
rr := httptest.NewRecorder()
1670+
router.ServeHTTP(rr, req)
1671+
1672+
// Handler always sets no-cache headers
1673+
assert.Equal(t, "no-cache, private, max-age=0", rr.Header().Get("Cache-Control"))
1674+
assert.Equal(t, "no-cache", rr.Header().Get("Pragma"))
1675+
assert.Equal(t, "0", rr.Header().Get("X-Accel-Expires"))
1676+
1677+
// Happy path: backend returns OK
1678+
assert.Equal(t, http.StatusOK, rr.Code)
1679+
assert.Equal(t, "OK", rr.Body.String())
1680+
}
1681+
}

backend/pkg/config/config.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,6 @@ func flagset() *flag.FlagSet {
335335
f.String("listen-addr", "", "Address to listen on; default is empty, which means listening to any address")
336336
f.Uint("port", defaultPort, "Port to listen from")
337337
f.String("proxy-urls", "", "Allow proxy requests to specified URLs")
338-
f.Bool("enable-helm", false, "Enable Helm operations")
339338

340339
f.String("oidc-client-id", "", "ClientID for OIDC")
341340
f.String("oidc-client-secret", "", "ClientSecret for OIDC")

backend/pkg/config/config_test.go

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -178,13 +178,6 @@ func TestParseFlags(t *testing.T) {
178178
assert.Equal(t, filepath.Join(getTestDataPath(), "valid_ca.pem"), conf.OidcCAFile)
179179
},
180180
},
181-
{
182-
name: "enable_helm",
183-
args: []string{"go run ./cmd", "--enable-helm"},
184-
verify: func(t *testing.T, conf *config.Config) {
185-
assert.Equal(t, true, conf.EnableHelm)
186-
},
187-
},
188181
}
189182

190183
for _, tt := range tests {

backend/pkg/helm/release.go

Lines changed: 42 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package helm
1818

1919
import (
20+
"context"
2021
"encoding/base64"
2122
"encoding/json"
2223
"errors"
@@ -36,6 +37,9 @@ import (
3637
"helm.sh/helm/v3/pkg/getter"
3738
"helm.sh/helm/v3/pkg/release"
3839
"helm.sh/helm/v3/pkg/storage/driver"
40+
authv1 "k8s.io/api/authentication/v1"
41+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
42+
"k8s.io/client-go/kubernetes"
3943
"sigs.k8s.io/yaml"
4044
)
4145

@@ -576,15 +580,49 @@ func (h *Handler) getChart(
576580
return chart, nil
577581
}
578582

583+
// Verify the user has minimal privileges by performing a whoami check.
584+
// This prevents spurious downloads by ensuring basic authentication before proceeding.
585+
func VerifyUser(h *Handler, req InstallRequest) bool {
586+
restConfig, err := h.Configuration.RESTClientGetter.ToRESTConfig()
587+
if err != nil {
588+
logger.Log(logger.LevelError, map[string]string{"chart": req.Chart, "releaseName": req.Name}, err, "getting chart")
589+
return false
590+
}
591+
592+
cs, err := kubernetes.NewForConfig(restConfig)
593+
if err != nil {
594+
logger.Log(logger.LevelError, map[string]string{"chart": req.Chart, "releaseName": req.Name}, err, "getting chart")
595+
return false
596+
}
597+
598+
review, err := cs.AuthenticationV1().SelfSubjectReviews().Create(context.Background(),
599+
&authv1.SelfSubjectReview{}, metav1.CreateOptions{})
600+
if err != nil {
601+
logger.Log(logger.LevelError, map[string]string{"chart": req.Chart, "releaseName": req.Name}, err, "getting chart")
602+
return false
603+
}
604+
605+
if user := review.Status.UserInfo.Username; user == "" || user == "system:anonymous" {
606+
logger.Log(logger.LevelError, map[string]string{"chart": req.Chart, "releaseName": req.Name},
607+
errors.New("insufficient privileges"), "getting chart: user is not authorized to perform this operation")
608+
return false
609+
}
610+
611+
return true
612+
}
613+
579614
func (h *Handler) installRelease(req InstallRequest) {
580-
// Get install client
581615
installClient := action.NewInstall(h.Configuration)
582616
installClient.ReleaseName = req.Name
583617
installClient.Namespace = req.Namespace
584618
installClient.Description = req.Description
585619
installClient.CreateNamespace = req.CreateNamespace
586620
installClient.ChartPathOptions.Version = req.Version
587621

622+
if !VerifyUser(h, req) {
623+
return
624+
}
625+
588626
chart, err := h.getChart("install", req.Chart, req.Name,
589627
installClient.ChartPathOptions, req.DependencyUpdate, h.EnvSettings)
590628
if err != nil {
@@ -594,8 +632,6 @@ func (h *Handler) installRelease(req InstallRequest) {
594632
return
595633
}
596634

597-
values := make(map[string]interface{})
598-
599635
decodedBytes, err := base64.StdEncoding.DecodeString(req.Values)
600636
if err != nil {
601637
logger.Log(logger.LevelError, map[string]string{"chart": req.Chart, "releaseName": req.Name},
@@ -605,28 +641,23 @@ func (h *Handler) installRelease(req InstallRequest) {
605641
return
606642
}
607643

608-
err = yaml.Unmarshal(decodedBytes, &values)
609-
if err != nil {
644+
values := make(map[string]interface{})
645+
if err = yaml.Unmarshal(decodedBytes, &values); err != nil {
610646
logger.Log(logger.LevelError, map[string]string{"chart": req.Chart, "releaseName": req.Name},
611647
err, "unmarshalling values")
612648
h.setReleaseStatusSilent("install", req.Name, failed, err)
613649

614650
return
615651
}
616652

617-
// Install chart
618-
_, err = installClient.Run(chart, values)
619-
if err != nil {
653+
if _, err = installClient.Run(chart, values); err != nil {
620654
logger.Log(logger.LevelError, map[string]string{"chart": req.Chart, "releaseName": req.Name},
621655
err, "installing chart")
622656
h.setReleaseStatusSilent("install", req.Name, failed, err)
623657

624658
return
625659
}
626660

627-
logger.Log(logger.LevelInfo, map[string]string{"chart": req.Chart, "releaseName": req.Name},
628-
nil, "chart installed successfully")
629-
630661
h.setReleaseStatusSilent("install", req.Name, success, nil)
631662
}
632663

0 commit comments

Comments
 (0)